problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_34001 | rasdani/github-patches | git_diff | kornia__kornia-2526 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use Kornia resize in ResizePreprocessor in ObjectDetector
Use Kornia resize in ResizePreprocessor
_Originally posted by @edgarriba in https://github.com/kornia/kornia/pull/2363#discussion_r1257304346_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kornia/contrib/object_detection.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import Any
4
5 import torch
6 import torch.nn.functional as F
7
8 from kornia.core import Module, Tensor, concatenate
9
10
11 class ResizePreProcessor(Module):
12 """This module resizes a list of image tensors to the given size, and also returns the original image sizes for
13 further post-processing."""
14
15 def __init__(self, size: int | tuple[int, int], interpolation_mode: str = "bilinear") -> None:
16 """
17 Args:
18 size: images will be resized to this value. If a 2-integer tuple is given, it is interpreted as
19 (height, width). If an integer is given, images will be resized to a square.
20 interpolation_mode: interpolation mode for image resizing. Supported values: ``nearest``, ``bilinear``,
21 ``bicubic``, ``area``, and ``nearest-exact``.
22 """
23 super().__init__()
24 self.size = size
25 self.interpolation_mode = interpolation_mode
26
27 def forward(self, imgs: list[Tensor]) -> tuple[Tensor, dict[str, Any]]:
28 # TODO: support other input formats e.g. file path, numpy
29 # NOTE: antialias=False is used in F.interpolate()
30 original_sizes = [(img.shape[1], img.shape[2]) for img in imgs]
31 resized_imgs = [F.interpolate(img.unsqueeze(0), self.size, mode=self.interpolation_mode) for img in imgs]
32 return concatenate(resized_imgs), {"original_size": original_sizes}
33
34
35 class ObjectDetector:
36 """This class wraps an object detection model and performs pre-processing and post-processing."""
37
38 def __init__(self, model: Module, pre_processor: Module, post_processor: Module) -> None:
39 """Construct an Object Detector object.
40
41 Args:
42 model: an object detection model.
43 pre_processor: a pre-processing module
44 post_processor: a post-processing module.
45 """
46 super().__init__()
47 self.model = model.eval()
48 self.pre_processor = pre_processor.eval()
49 self.post_processor = post_processor.eval()
50
51 @torch.inference_mode()
52 def predict(self, imgs: list[Tensor]) -> list[Tensor]:
53 """Detect objects in a given list of images.
54
55 Args:
56 imgs: list of RGB images. Each image is a Tensor with shape :math:`(3, H, W)`.
57
58 Returns:
59 list of detections found in each image. For item in a batch, shape is :math:`(D, 6)`, where :math:`D` is the
60 number of detections in the given image, :math:`6` represents class id, score, and `xywh` bounding box.
61 """
62 imgs, meta = self.pre_processor(imgs)
63 out = self.model(imgs)
64 detections = self.post_processor(out, meta)
65 return detections
66
67 def compile(
68 self,
69 *,
70 fullgraph: bool = False,
71 dynamic: bool = False,
72 backend: str = 'inductor',
73 mode: str | None = None,
74 options: dict[str, str | int | bool] | None = None,
75 disable: bool = False,
76 ) -> None:
77 """Compile the internal object detection model with :py:func:`torch.compile()`."""
78 self.model = torch.compile( # type: ignore
79 self.model,
80 fullgraph=fullgraph,
81 dynamic=dynamic,
82 backend=backend,
83 mode=mode,
84 options=options,
85 disable=disable,
86 )
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kornia/contrib/object_detection.py b/kornia/contrib/object_detection.py
--- a/kornia/contrib/object_detection.py
+++ b/kornia/contrib/object_detection.py
@@ -3,32 +3,32 @@
from typing import Any
import torch
-import torch.nn.functional as F
from kornia.core import Module, Tensor, concatenate
+from kornia.geometry.transform import Resize
class ResizePreProcessor(Module):
"""This module resizes a list of image tensors to the given size, and also returns the original image sizes for
further post-processing."""
- def __init__(self, size: int | tuple[int, int], interpolation_mode: str = "bilinear") -> None:
+ def __init__(self, size: tuple[int, int], interpolation_mode: str = "bilinear") -> None:
"""
Args:
size: images will be resized to this value. If a 2-integer tuple is given, it is interpreted as
- (height, width). If an integer is given, images will be resized to a square.
+ (height, width).
interpolation_mode: interpolation mode for image resizing. Supported values: ``nearest``, ``bilinear``,
``bicubic``, ``area``, and ``nearest-exact``.
"""
super().__init__()
self.size = size
- self.interpolation_mode = interpolation_mode
+ self.resizer = Resize(self.size, interpolation=interpolation_mode)
def forward(self, imgs: list[Tensor]) -> tuple[Tensor, dict[str, Any]]:
# TODO: support other input formats e.g. file path, numpy
# NOTE: antialias=False is used in F.interpolate()
original_sizes = [(img.shape[1], img.shape[2]) for img in imgs]
- resized_imgs = [F.interpolate(img.unsqueeze(0), self.size, mode=self.interpolation_mode) for img in imgs]
+ resized_imgs = [self.resizer(img.unsqueeze(0)) for img in imgs]
return concatenate(resized_imgs), {"original_size": original_sizes}
| {"golden_diff": "diff --git a/kornia/contrib/object_detection.py b/kornia/contrib/object_detection.py\n--- a/kornia/contrib/object_detection.py\n+++ b/kornia/contrib/object_detection.py\n@@ -3,32 +3,32 @@\n from typing import Any\n \n import torch\n-import torch.nn.functional as F\n \n from kornia.core import Module, Tensor, concatenate\n+from kornia.geometry.transform import Resize\n \n \n class ResizePreProcessor(Module):\n \"\"\"This module resizes a list of image tensors to the given size, and also returns the original image sizes for\n further post-processing.\"\"\"\n \n- def __init__(self, size: int | tuple[int, int], interpolation_mode: str = \"bilinear\") -> None:\n+ def __init__(self, size: tuple[int, int], interpolation_mode: str = \"bilinear\") -> None:\n \"\"\"\n Args:\n size: images will be resized to this value. If a 2-integer tuple is given, it is interpreted as\n- (height, width). If an integer is given, images will be resized to a square.\n+ (height, width).\n interpolation_mode: interpolation mode for image resizing. Supported values: ``nearest``, ``bilinear``,\n ``bicubic``, ``area``, and ``nearest-exact``.\n \"\"\"\n super().__init__()\n self.size = size\n- self.interpolation_mode = interpolation_mode\n+ self.resizer = Resize(self.size, interpolation=interpolation_mode)\n \n def forward(self, imgs: list[Tensor]) -> tuple[Tensor, dict[str, Any]]:\n # TODO: support other input formats e.g. file path, numpy\n # NOTE: antialias=False is used in F.interpolate()\n original_sizes = [(img.shape[1], img.shape[2]) for img in imgs]\n- resized_imgs = [F.interpolate(img.unsqueeze(0), self.size, mode=self.interpolation_mode) for img in imgs]\n+ resized_imgs = [self.resizer(img.unsqueeze(0)) for img in imgs]\n return concatenate(resized_imgs), {\"original_size\": original_sizes}\n", "issue": "Use Kornia resize in ResizePreprocessor in ObjectDetector\n Use Kornia resize in ResizePreprocessor\r\n\r\n_Originally posted by @edgarriba in https://github.com/kornia/kornia/pull/2363#discussion_r1257304346_\r\n \n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nimport torch\nimport torch.nn.functional as F\n\nfrom kornia.core import Module, Tensor, concatenate\n\n\nclass ResizePreProcessor(Module):\n \"\"\"This module resizes a list of image tensors to the given size, and also returns the original image sizes for\n further post-processing.\"\"\"\n\n def __init__(self, size: int | tuple[int, int], interpolation_mode: str = \"bilinear\") -> None:\n \"\"\"\n Args:\n size: images will be resized to this value. If a 2-integer tuple is given, it is interpreted as\n (height, width). If an integer is given, images will be resized to a square.\n interpolation_mode: interpolation mode for image resizing. Supported values: ``nearest``, ``bilinear``,\n ``bicubic``, ``area``, and ``nearest-exact``.\n \"\"\"\n super().__init__()\n self.size = size\n self.interpolation_mode = interpolation_mode\n\n def forward(self, imgs: list[Tensor]) -> tuple[Tensor, dict[str, Any]]:\n # TODO: support other input formats e.g. file path, numpy\n # NOTE: antialias=False is used in F.interpolate()\n original_sizes = [(img.shape[1], img.shape[2]) for img in imgs]\n resized_imgs = [F.interpolate(img.unsqueeze(0), self.size, mode=self.interpolation_mode) for img in imgs]\n return concatenate(resized_imgs), {\"original_size\": original_sizes}\n\n\nclass ObjectDetector:\n \"\"\"This class wraps an object detection model and performs pre-processing and post-processing.\"\"\"\n\n def __init__(self, model: Module, pre_processor: Module, post_processor: Module) -> None:\n \"\"\"Construct an Object Detector object.\n\n Args:\n model: an object detection model.\n pre_processor: a pre-processing module\n post_processor: a post-processing module.\n \"\"\"\n super().__init__()\n self.model = model.eval()\n self.pre_processor = pre_processor.eval()\n self.post_processor = post_processor.eval()\n\n @torch.inference_mode()\n def predict(self, imgs: list[Tensor]) -> list[Tensor]:\n \"\"\"Detect objects in a given list of images.\n\n Args:\n imgs: list of RGB images. Each image is a Tensor with shape :math:`(3, H, W)`.\n\n Returns:\n list of detections found in each image. For item in a batch, shape is :math:`(D, 6)`, where :math:`D` is the\n number of detections in the given image, :math:`6` represents class id, score, and `xywh` bounding box.\n \"\"\"\n imgs, meta = self.pre_processor(imgs)\n out = self.model(imgs)\n detections = self.post_processor(out, meta)\n return detections\n\n def compile(\n self,\n *,\n fullgraph: bool = False,\n dynamic: bool = False,\n backend: str = 'inductor',\n mode: str | None = None,\n options: dict[str, str | int | bool] | None = None,\n disable: bool = False,\n ) -> None:\n \"\"\"Compile the internal object detection model with :py:func:`torch.compile()`.\"\"\"\n self.model = torch.compile( # type: ignore\n self.model,\n fullgraph=fullgraph,\n dynamic=dynamic,\n backend=backend,\n mode=mode,\n options=options,\n disable=disable,\n )\n", "path": "kornia/contrib/object_detection.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nimport torch\n\nfrom kornia.core import Module, Tensor, concatenate\nfrom kornia.geometry.transform import Resize\n\n\nclass ResizePreProcessor(Module):\n \"\"\"This module resizes a list of image tensors to the given size, and also returns the original image sizes for\n further post-processing.\"\"\"\n\n def __init__(self, size: tuple[int, int], interpolation_mode: str = \"bilinear\") -> None:\n \"\"\"\n Args:\n size: images will be resized to this value. If a 2-integer tuple is given, it is interpreted as\n (height, width).\n interpolation_mode: interpolation mode for image resizing. Supported values: ``nearest``, ``bilinear``,\n ``bicubic``, ``area``, and ``nearest-exact``.\n \"\"\"\n super().__init__()\n self.size = size\n self.resizer = Resize(self.size, interpolation=interpolation_mode)\n\n def forward(self, imgs: list[Tensor]) -> tuple[Tensor, dict[str, Any]]:\n # TODO: support other input formats e.g. file path, numpy\n # NOTE: antialias=False is used in F.interpolate()\n original_sizes = [(img.shape[1], img.shape[2]) for img in imgs]\n resized_imgs = [self.resizer(img.unsqueeze(0)) for img in imgs]\n return concatenate(resized_imgs), {\"original_size\": original_sizes}\n\n\nclass ObjectDetector:\n \"\"\"This class wraps an object detection model and performs pre-processing and post-processing.\"\"\"\n\n def __init__(self, model: Module, pre_processor: Module, post_processor: Module) -> None:\n \"\"\"Construct an Object Detector object.\n\n Args:\n model: an object detection model.\n pre_processor: a pre-processing module\n post_processor: a post-processing module.\n \"\"\"\n super().__init__()\n self.model = model.eval()\n self.pre_processor = pre_processor.eval()\n self.post_processor = post_processor.eval()\n\n @torch.inference_mode()\n def predict(self, imgs: list[Tensor]) -> list[Tensor]:\n \"\"\"Detect objects in a given list of images.\n\n Args:\n imgs: list of RGB images. Each image is a Tensor with shape :math:`(3, H, W)`.\n\n Returns:\n list of detections found in each image. For item in a batch, shape is :math:`(D, 6)`, where :math:`D` is the\n number of detections in the given image, :math:`6` represents class id, score, and `xywh` bounding box.\n \"\"\"\n imgs, meta = self.pre_processor(imgs)\n out = self.model(imgs)\n detections = self.post_processor(out, meta)\n return detections\n\n def compile(\n self,\n *,\n fullgraph: bool = False,\n dynamic: bool = False,\n backend: str = 'inductor',\n mode: str | None = None,\n options: dict[str, str | int | bool] | None = None,\n disable: bool = False,\n ) -> None:\n \"\"\"Compile the internal object detection model with :py:func:`torch.compile()`.\"\"\"\n self.model = torch.compile( # type: ignore\n self.model,\n fullgraph=fullgraph,\n dynamic=dynamic,\n backend=backend,\n mode=mode,\n options=options,\n disable=disable,\n )\n", "path": "kornia/contrib/object_detection.py"}]} | 1,242 | 452 |
gh_patches_debug_5800 | rasdani/github-patches | git_diff | pymeasure__pymeasure-340 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 3.9 compatibility
Python 3.9 [is out](https://docs.python.org/3.9/whatsnew/3.9.html). We should ensure that we are compatible, so there are a couple of things to do
* [x] Create a fresh Python 3.9 environment.yml (with current package versions)
* [x] Update Travis and Appveyor CI setup files
- [x] Check if the Appveyor 3.8 build can use the 3.8 miniconda, not 3.7, now
- [x] I think we should relax the python version specifiers in the environment.yml to major.minor (i.e. python 3.8, not 3.8.1), to also get python bugfixes, even though it's a bit less strict in CI version stability.
- [x] Check if we should bump Trais ubuntu version to the current LTS focal (20.04)
* The conda-forge package is repackaged automatically, apparently - it's already available.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2020 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25
26 from setuptools import setup, find_packages
27
28 setup(
29 name='PyMeasure',
30 version='0.8.0',
31 author='PyMeasure Developers',
32 packages=find_packages(),
33 scripts=[],
34 url='https://github.com/ralph-group/pymeasure',
35 download_url='https://github.com/ralph-group/pymeasure/tarball/v0.8.0',
36 license='MIT License',
37 description='Scientific measurement library for instruments, experiments, and live-plotting',
38 long_description=open('README.rst').read() + "\n\n" + open('CHANGES.txt').read(),
39 install_requires=[
40 "numpy >= 1.6.1",
41 "pandas >= 0.14",
42 "pyvisa >= 1.8",
43 "pyserial >= 2.7",
44 "pyqtgraph >= 0.9.10"
45 ],
46 extras_require={
47 'matplotlib': ['matplotlib >= 2.0.2'],
48 'tcp': [
49 'zmq >= 16.0.2',
50 'cloudpickle >= 0.3.1'
51 ],
52 'python-vxi11': ['python-vxi11 >= 0.9']
53 },
54 setup_requires=[
55 'pytest-runner'
56 ],
57 tests_require=[
58 'pytest >= 2.9.1',
59 'pytest-qt >= 2.4.0'
60 ],
61 classifiers=[
62 "Development Status :: 4 - Beta",
63 "Intended Audience :: Science/Research",
64 "License :: OSI Approved :: MIT License",
65 "Operating System :: MacOS",
66 "Operating System :: Microsoft :: Windows",
67 "Operating System :: POSIX",
68 "Operating System :: Unix",
69 "Programming Language :: Python :: 3 :: Only",
70 "Programming Language :: Python :: 3.6",
71 "Programming Language :: Python :: 3.7",
72 "Programming Language :: Python :: 3.8",
73 "Topic :: Scientific/Engineering",
74 ],
75 keywords="measure instrument experiment control automate graph plot"
76 )
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -70,6 +70,7 @@
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
],
keywords="measure instrument experiment control automate graph plot"
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -70,6 +70,7 @@\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"measure instrument experiment control automate graph plot\"\n", "issue": "Python 3.9 compatibility\nPython 3.9 [is out](https://docs.python.org/3.9/whatsnew/3.9.html). We should ensure that we are compatible, so there are a couple of things to do\r\n\r\n* [x] Create a fresh Python 3.9 environment.yml (with current package versions)\r\n* [x] Update Travis and Appveyor CI setup files\r\n - [x] Check if the Appveyor 3.8 build can use the 3.8 miniconda, not 3.7, now\r\n - [x] I think we should relax the python version specifiers in the environment.yml to major.minor (i.e. python 3.8, not 3.8.1), to also get python bugfixes, even though it's a bit less strict in CI version stability.\r\n - [x] Check if we should bump Trais ubuntu version to the current LTS focal (20.04)\r\n* The conda-forge package is repackaged automatically, apparently - it's already available.\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2020 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='PyMeasure',\n version='0.8.0',\n author='PyMeasure Developers',\n packages=find_packages(),\n scripts=[],\n url='https://github.com/ralph-group/pymeasure',\n download_url='https://github.com/ralph-group/pymeasure/tarball/v0.8.0',\n license='MIT License',\n description='Scientific measurement library for instruments, experiments, and live-plotting',\n long_description=open('README.rst').read() + \"\\n\\n\" + open('CHANGES.txt').read(),\n install_requires=[\n \"numpy >= 1.6.1\",\n \"pandas >= 0.14\",\n \"pyvisa >= 1.8\",\n \"pyserial >= 2.7\",\n \"pyqtgraph >= 0.9.10\"\n ],\n extras_require={\n 'matplotlib': ['matplotlib >= 2.0.2'],\n 'tcp': [\n 'zmq >= 16.0.2',\n 'cloudpickle >= 0.3.1'\n ],\n 'python-vxi11': ['python-vxi11 >= 0.9']\n },\n setup_requires=[\n 'pytest-runner'\n ],\n tests_require=[\n 'pytest >= 2.9.1',\n 'pytest-qt >= 2.4.0'\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"measure instrument experiment control automate graph plot\"\n)\n", "path": "setup.py"}], "after_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2020 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='PyMeasure',\n version='0.8.0',\n author='PyMeasure Developers',\n packages=find_packages(),\n scripts=[],\n url='https://github.com/ralph-group/pymeasure',\n download_url='https://github.com/ralph-group/pymeasure/tarball/v0.8.0',\n license='MIT License',\n description='Scientific measurement library for instruments, experiments, and live-plotting',\n long_description=open('README.rst').read() + \"\\n\\n\" + open('CHANGES.txt').read(),\n install_requires=[\n \"numpy >= 1.6.1\",\n \"pandas >= 0.14\",\n \"pyvisa >= 1.8\",\n \"pyserial >= 2.7\",\n \"pyqtgraph >= 0.9.10\"\n ],\n extras_require={\n 'matplotlib': ['matplotlib >= 2.0.2'],\n 'tcp': [\n 'zmq >= 16.0.2',\n 'cloudpickle >= 0.3.1'\n ],\n 'python-vxi11': ['python-vxi11 >= 0.9']\n },\n setup_requires=[\n 'pytest-runner'\n ],\n tests_require=[\n 'pytest >= 2.9.1',\n 'pytest-qt >= 2.4.0'\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"measure instrument experiment control automate graph plot\"\n)\n", "path": "setup.py"}]} | 1,323 | 102 |
gh_patches_debug_57452 | rasdani/github-patches | git_diff | ManageIQ__integration_tests-204 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Model up yaml configs from the DB
Various yaml configs are stored in the database, including the one we'll most likely want to imspect, which is the vmdb config.
The schema, for our purposes, is two fields in the `configurations` table, `typ` and `settings`. The interface that I'm leaning toward is configurations[typ] = dictified_yaml(settings), if that makes sense.
So, for example, if we wanted to see whether or not to get a list of public images from ec2, the lookup would be `configurations['vmdb']['ems_refresh']['ec2']['get_public_images']`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fixtures/cfmedb.py`
Content:
```
1 '''
2
3
4 Created on Jun 14, 2013
5
6 @author: bcrochet
7
8 '''
9 # -*- coding: utf-8 -*-
10 # pylint: disable=C0103
11 # pylint: disable=E1101
12 import pytest
13 from urlparse import urlparse
14 from sqlalchemy import create_engine
15 from sqlalchemy.orm import sessionmaker
16 import ConfigParser
17
18 def pytest_addoption(parser):
19 '''Create the options for py.test'''
20 config = ConfigParser.ConfigParser(defaults={
21 'cfmedburl': ''
22 })
23 config.read('cfme.cfg')
24
25 group = parser.getgroup('cfme', 'cfme')
26 group.addoption('--cfmedburl',
27 action='store',
28 dest='cfme_db_url',
29 default=config.get('DEFAULT', 'cfmedburl'),
30 metavar='url',
31 help='url for CFME database to connect to')
32
33 def pytest_sessionstart(session):
34 '''Setup run for tests'''
35 import db
36 db.cfme_db_url = session.config.option.cfme_db_url
37 if not db.cfme_db_url:
38 # Let's try to figure it out
39 baseurl = session.config.option.base_url
40 baseip = urlparse(baseurl).hostname
41 db.cfme_db_url = "postgres://root:smartvm@%s:5432/vmdb_production" \
42 % baseip
43 db.engine = create_engine(db.cfme_db_url)
44
45 @pytest.fixture
46 def db_session():
47 '''Creates a database session based on the db url passed on the CLI
48
49 Usage example:
50
51 This is a SQLalchemy (http://www.sqlalchemy.org/) session. You can make
52 queries and create new rows in the database with this session.
53
54 The available classes are dynamically generated from the database. Consult
55 db/__init__.py for a list of available class -> table mappings.
56
57 An example test:
58
59 @pytest.mark.nondestructive
60 def test_that_tries_for_db(db_session):
61 import db
62 session = db_session
63 for instance in session.query(db.ExtManagementSystem).order_by(
64 db.ExtManagementSystem.id):
65 print instance.name, instance.hostname
66
67 This 'test' prints the management systems from the database.
68 '''
69 import db
70 Session = sessionmaker(bind=db.engine)
71 return Session()
72
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fixtures/cfmedb.py b/fixtures/cfmedb.py
--- a/fixtures/cfmedb.py
+++ b/fixtures/cfmedb.py
@@ -70,3 +70,13 @@
Session = sessionmaker(bind=db.engine)
return Session()
[email protected]
+def db_yamls(db_session):
+ '''Returns the yamls from the db configuration table as a dict'''
+
+ import db
+ import yaml
+ configs = db_session.query(db.Configuration.typ, db.Configuration.settings)
+ data = {name: yaml.load(settings) for name, settings in configs}
+
+ return data
| {"golden_diff": "diff --git a/fixtures/cfmedb.py b/fixtures/cfmedb.py\n--- a/fixtures/cfmedb.py\n+++ b/fixtures/cfmedb.py\n@@ -70,3 +70,13 @@\n Session = sessionmaker(bind=db.engine)\n return Session()\n \[email protected]\n+def db_yamls(db_session):\n+ '''Returns the yamls from the db configuration table as a dict'''\n+\n+ import db\n+ import yaml\n+ configs = db_session.query(db.Configuration.typ, db.Configuration.settings)\n+ data = {name: yaml.load(settings) for name, settings in configs}\n+\n+ return data\n", "issue": "Model up yaml configs from the DB\nVarious yaml configs are stored in the database, including the one we'll most likely want to imspect, which is the vmdb config.\n\nThe schema, for our purposes, is two fields in the `configurations` table, `typ` and `settings`. The interface that I'm leaning toward is configurations[typ] = dictified_yaml(settings), if that makes sense. \n\nSo, for example, if we wanted to see whether or not to get a list of public images from ec2, the lookup would be `configurations['vmdb']['ems_refresh']['ec2']['get_public_images']`\n\n", "before_files": [{"content": "'''\n\n\nCreated on Jun 14, 2013\n\n@author: bcrochet\n\n'''\n# -*- coding: utf-8 -*-\n# pylint: disable=C0103\n# pylint: disable=E1101\nimport pytest\nfrom urlparse import urlparse\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport ConfigParser\n\ndef pytest_addoption(parser):\n '''Create the options for py.test'''\n config = ConfigParser.ConfigParser(defaults={\n 'cfmedburl': ''\n })\n config.read('cfme.cfg')\n\n group = parser.getgroup('cfme', 'cfme')\n group.addoption('--cfmedburl',\n action='store',\n dest='cfme_db_url',\n default=config.get('DEFAULT', 'cfmedburl'),\n metavar='url',\n help='url for CFME database to connect to')\n\ndef pytest_sessionstart(session):\n '''Setup run for tests'''\n import db\n db.cfme_db_url = session.config.option.cfme_db_url\n if not db.cfme_db_url:\n # Let's try to figure it out\n baseurl = session.config.option.base_url\n baseip = urlparse(baseurl).hostname\n db.cfme_db_url = \"postgres://root:smartvm@%s:5432/vmdb_production\" \\\n % baseip\n db.engine = create_engine(db.cfme_db_url)\n\[email protected]\ndef db_session():\n '''Creates a database session based on the db url passed on the CLI\n\n Usage example:\n\n This is a SQLalchemy (http://www.sqlalchemy.org/) session. You can make\n queries and create new rows in the database with this session.\n\n The available classes are dynamically generated from the database. Consult\n db/__init__.py for a list of available class -> table mappings.\n\n An example test:\n\n @pytest.mark.nondestructive\n def test_that_tries_for_db(db_session):\n import db\n session = db_session\n for instance in session.query(db.ExtManagementSystem).order_by(\n db.ExtManagementSystem.id):\n print instance.name, instance.hostname\n\n This 'test' prints the management systems from the database.\n '''\n import db\n Session = sessionmaker(bind=db.engine)\n return Session()\n\n", "path": "fixtures/cfmedb.py"}], "after_files": [{"content": "'''\n\n\nCreated on Jun 14, 2013\n\n@author: bcrochet\n\n'''\n# -*- coding: utf-8 -*-\n# pylint: disable=C0103\n# pylint: disable=E1101\nimport pytest\nfrom urlparse import urlparse\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport ConfigParser\n\ndef pytest_addoption(parser):\n '''Create the options for py.test'''\n config = ConfigParser.ConfigParser(defaults={\n 'cfmedburl': ''\n })\n config.read('cfme.cfg')\n\n group = parser.getgroup('cfme', 'cfme')\n group.addoption('--cfmedburl',\n action='store',\n dest='cfme_db_url',\n default=config.get('DEFAULT', 'cfmedburl'),\n metavar='url',\n help='url for CFME database to connect to')\n\ndef pytest_sessionstart(session):\n '''Setup run for tests'''\n import db\n db.cfme_db_url = session.config.option.cfme_db_url\n if not db.cfme_db_url:\n # Let's try to figure it out\n baseurl = session.config.option.base_url\n baseip = urlparse(baseurl).hostname\n db.cfme_db_url = \"postgres://root:smartvm@%s:5432/vmdb_production\" \\\n % baseip\n db.engine = create_engine(db.cfme_db_url)\n\[email protected]\ndef db_session():\n '''Creates a database session based on the db url passed on the CLI\n\n Usage example:\n\n This is a SQLalchemy (http://www.sqlalchemy.org/) session. You can make\n queries and create new rows in the database with this session.\n\n The available classes are dynamically generated from the database. Consult\n db/__init__.py for a list of available class -> table mappings.\n\n An example test:\n\n @pytest.mark.nondestructive\n def test_that_tries_for_db(db_session):\n import db\n session = db_session\n for instance in session.query(db.ExtManagementSystem).order_by(\n db.ExtManagementSystem.id):\n print instance.name, instance.hostname\n\n This 'test' prints the management systems from the database.\n '''\n import db\n Session = sessionmaker(bind=db.engine)\n return Session()\n\[email protected]\ndef db_yamls(db_session):\n '''Returns the yamls from the db configuration table as a dict'''\n\n import db\n import yaml\n configs = db_session.query(db.Configuration.typ, db.Configuration.settings)\n data = {name: yaml.load(settings) for name, settings in configs}\n\n return data\n", "path": "fixtures/cfmedb.py"}]} | 1,030 | 144 |
gh_patches_debug_21975 | rasdani/github-patches | git_diff | getpelican__pelican-1515 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Built-in server relies on file name suffix to guess Content-Type
Pelican's built-in web server relies on the standard `SimpleHTTPServer` module to guess the appropriate `Content-Type` header for the files it serves. Sadly, that implementation relies on file name suffix to make its guesses. When I configure my site to use URLs without suffixes...
```
'PAGE_URL': 'pages/{slug}'
```
...the server sends `Content-Type: application/octet-stream`, and my browser refuses to render the HTML.
This could be better, at least on systems that have the python-magic package installed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pelican/server.py`
Content:
```
1 from __future__ import print_function
2 import os
3 import sys
4 import logging
5 try:
6 import SimpleHTTPServer as srvmod
7 except ImportError:
8 import http.server as srvmod # NOQA
9
10 try:
11 import SocketServer as socketserver
12 except ImportError:
13 import socketserver # NOQA
14
15 PORT = len(sys.argv) in (2, 3) and int(sys.argv[1]) or 8000
16 SERVER = len(sys.argv) == 3 and sys.argv[2] or ""
17 SUFFIXES = ['', '.html', '/index.html']
18
19
20 class ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):
21 def do_GET(self):
22 # Try to detect file by applying various suffixes
23 for suffix in SUFFIXES:
24 if not hasattr(self, 'original_path'):
25 self.original_path = self.path
26
27 self.path = self.original_path + suffix
28 path = self.translate_path(self.path)
29
30 if os.path.exists(path):
31 srvmod.SimpleHTTPRequestHandler.do_GET(self)
32 logging.info("Found `%s`." % self.path)
33 break
34
35 logging.info("Tried to find `%s`, but it doesn't exist.",
36 self.path)
37 else:
38 # Fallback if there were no matches
39 logging.warning("Unable to find `%s` or variations.",
40 self.original_path)
41
42 Handler = ComplexHTTPRequestHandler
43
44 socketserver.TCPServer.allow_reuse_address = True
45 try:
46 httpd = socketserver.TCPServer((SERVER, PORT), Handler)
47 except OSError as e:
48 logging.error("Could not listen on port %s, server %s.", PORT, SERVER)
49 sys.exit(getattr(e, 'exitcode', 1))
50
51
52 logging.info("Serving at port %s, server %s.", PORT, SERVER)
53 try:
54 httpd.serve_forever()
55 except KeyboardInterrupt as e:
56 logging.info("Shutting down server.")
57 httpd.socket.close()
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pelican/server.py b/pelican/server.py
--- a/pelican/server.py
+++ b/pelican/server.py
@@ -12,6 +12,11 @@
except ImportError:
import socketserver # NOQA
+try:
+ from magic import from_file as magic_from_file
+except ImportError:
+ magic_from_file = None
+
PORT = len(sys.argv) in (2, 3) and int(sys.argv[1]) or 8000
SERVER = len(sys.argv) == 3 and sys.argv[2] or ""
SUFFIXES = ['', '.html', '/index.html']
@@ -39,6 +44,18 @@
logging.warning("Unable to find `%s` or variations.",
self.original_path)
+ def guess_type(self, path):
+ """Guess at the mime type for the specified file.
+ """
+ mimetype = srvmod.SimpleHTTPRequestHandler.guess_type(self, path)
+
+ # If the default guess is too generic, try the python-magic library
+ if mimetype == 'application/octet-stream' and magic_from_file:
+ mimetype = magic_from_file(path, mime=True)
+
+ return mimetype
+
+
Handler = ComplexHTTPRequestHandler
socketserver.TCPServer.allow_reuse_address = True
| {"golden_diff": "diff --git a/pelican/server.py b/pelican/server.py\n--- a/pelican/server.py\n+++ b/pelican/server.py\n@@ -12,6 +12,11 @@\n except ImportError:\n import socketserver # NOQA\n \n+try:\n+ from magic import from_file as magic_from_file\n+except ImportError:\n+ magic_from_file = None\n+\n PORT = len(sys.argv) in (2, 3) and int(sys.argv[1]) or 8000\n SERVER = len(sys.argv) == 3 and sys.argv[2] or \"\"\n SUFFIXES = ['', '.html', '/index.html']\n@@ -39,6 +44,18 @@\n logging.warning(\"Unable to find `%s` or variations.\",\n self.original_path)\n \n+ def guess_type(self, path):\n+ \"\"\"Guess at the mime type for the specified file.\n+ \"\"\"\n+ mimetype = srvmod.SimpleHTTPRequestHandler.guess_type(self, path)\n+\n+ # If the default guess is too generic, try the python-magic library\n+ if mimetype == 'application/octet-stream' and magic_from_file:\n+ mimetype = magic_from_file(path, mime=True)\n+\n+ return mimetype\n+\n+\n Handler = ComplexHTTPRequestHandler\n \n socketserver.TCPServer.allow_reuse_address = True\n", "issue": "Built-in server relies on file name suffix to guess Content-Type\nPelican's built-in web server relies on the standard `SimpleHTTPServer` module to guess the appropriate `Content-Type` header for the files it serves. Sadly, that implementation relies on file name suffix to make its guesses. When I configure my site to use URLs without suffixes...\n\n```\n'PAGE_URL': 'pages/{slug}'\n```\n\n...the server sends `Content-Type: application/octet-stream`, and my browser refuses to render the HTML.\n\nThis could be better, at least on systems that have the python-magic package installed.\n\n", "before_files": [{"content": "from __future__ import print_function\nimport os\nimport sys\nimport logging\ntry:\n import SimpleHTTPServer as srvmod\nexcept ImportError:\n import http.server as srvmod # NOQA\n\ntry:\n import SocketServer as socketserver\nexcept ImportError:\n import socketserver # NOQA\n\nPORT = len(sys.argv) in (2, 3) and int(sys.argv[1]) or 8000\nSERVER = len(sys.argv) == 3 and sys.argv[2] or \"\"\nSUFFIXES = ['', '.html', '/index.html']\n\n\nclass ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):\n def do_GET(self):\n # Try to detect file by applying various suffixes\n for suffix in SUFFIXES:\n if not hasattr(self, 'original_path'):\n self.original_path = self.path\n\n self.path = self.original_path + suffix\n path = self.translate_path(self.path)\n\n if os.path.exists(path):\n srvmod.SimpleHTTPRequestHandler.do_GET(self)\n logging.info(\"Found `%s`.\" % self.path)\n break\n\n logging.info(\"Tried to find `%s`, but it doesn't exist.\",\n self.path)\n else:\n # Fallback if there were no matches\n logging.warning(\"Unable to find `%s` or variations.\",\n self.original_path)\n\nHandler = ComplexHTTPRequestHandler\n\nsocketserver.TCPServer.allow_reuse_address = True\ntry:\n httpd = socketserver.TCPServer((SERVER, PORT), Handler)\nexcept OSError as e:\n logging.error(\"Could not listen on port %s, server %s.\", PORT, SERVER)\n sys.exit(getattr(e, 'exitcode', 1))\n\n\nlogging.info(\"Serving at port %s, server %s.\", PORT, SERVER)\ntry:\n httpd.serve_forever()\nexcept KeyboardInterrupt as e:\n logging.info(\"Shutting down server.\")\n httpd.socket.close()\n", "path": "pelican/server.py"}], "after_files": [{"content": "from __future__ import print_function\nimport os\nimport sys\nimport logging\ntry:\n import SimpleHTTPServer as srvmod\nexcept ImportError:\n import http.server as srvmod # NOQA\n\ntry:\n import SocketServer as socketserver\nexcept ImportError:\n import socketserver # NOQA\n\ntry:\n from magic import from_file as magic_from_file\nexcept ImportError:\n magic_from_file = None\n\nPORT = len(sys.argv) in (2, 3) and int(sys.argv[1]) or 8000\nSERVER = len(sys.argv) == 3 and sys.argv[2] or \"\"\nSUFFIXES = ['', '.html', '/index.html']\n\n\nclass ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):\n def do_GET(self):\n # Try to detect file by applying various suffixes\n for suffix in SUFFIXES:\n if not hasattr(self, 'original_path'):\n self.original_path = self.path\n\n self.path = self.original_path + suffix\n path = self.translate_path(self.path)\n\n if os.path.exists(path):\n srvmod.SimpleHTTPRequestHandler.do_GET(self)\n logging.info(\"Found `%s`.\" % self.path)\n break\n\n logging.info(\"Tried to find `%s`, but it doesn't exist.\",\n self.path)\n else:\n # Fallback if there were no matches\n logging.warning(\"Unable to find `%s` or variations.\",\n self.original_path)\n\n def guess_type(self, path):\n \"\"\"Guess at the mime type for the specified file.\n \"\"\"\n mimetype = srvmod.SimpleHTTPRequestHandler.guess_type(self, path)\n\n # If the default guess is too generic, try the python-magic library\n if mimetype == 'application/octet-stream' and magic_from_file:\n mimetype = magic_from_file(path, mime=True)\n\n return mimetype\n\n\nHandler = ComplexHTTPRequestHandler\n\nsocketserver.TCPServer.allow_reuse_address = True\ntry:\n httpd = socketserver.TCPServer((SERVER, PORT), Handler)\nexcept OSError as e:\n logging.error(\"Could not listen on port %s, server %s.\", PORT, SERVER)\n sys.exit(getattr(e, 'exitcode', 1))\n\n\nlogging.info(\"Serving at port %s, server %s.\", PORT, SERVER)\ntry:\n httpd.serve_forever()\nexcept KeyboardInterrupt as e:\n logging.info(\"Shutting down server.\")\n httpd.socket.close()\n", "path": "pelican/server.py"}]} | 904 | 287 |
gh_patches_debug_56716 | rasdani/github-patches | git_diff | mosaicml__composer-182 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add venv into docker image to enable editable `pip install`
When trying to install composer with `pip install -e .` from within the docker image, we are seeing this error:
```
Traceback (most recent call last):
File "/usr/bin/composer", line 33, in <module>
sys.exit(load_entry_point('mosaicml', 'console_scripts', 'composer')())
File "/usr/bin/composer", line 22, in importlib_load_entry_point
for entry_point in distribution(dist_name).entry_points
File "/usr/lib/python3.8/importlib/metadata.py", line 445, in distribution
return Distribution.from_name(distribution_name)
File "/usr/lib/python3.8/importlib/metadata.py", line 169, in from_name
raise PackageNotFoundError(name)
importlib.metadata.PackageNotFoundError: mosaicml
```
This seems to be remedied by running the `pip install` from within a virtualenv. Can we bake a virtualenv into the docker image as a workaround?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2021 MosaicML. All Rights Reserved.
2
3 import os
4 import sys
5 import textwrap
6
7 import setuptools
8 from setuptools import setup
9
10
11 def package_files(directory: str):
12 # from https://stackoverflow.com/a/36693250
13 paths = []
14 for (path, _, filenames) in os.walk(directory):
15 for filename in filenames:
16 paths.append(os.path.join('..', path, filename))
17 return paths
18
19
20 with open("README.md", "r", encoding="utf-8") as fh:
21 long_description = fh.read()
22
23 install_requires = [
24 "pyyaml>=5.4.1",
25 "tqdm>=4.62.3",
26 "torchmetrics>=0.6.0",
27 "torch_optimizer==0.1.0",
28 "torchvision>=0.9.0",
29 "torch>=1.9",
30 "yahp>=0.0.14",
31 "numpy==1.21.5",
32 ]
33 extra_deps = {}
34
35 extra_deps['base'] = []
36
37 extra_deps['dev'] = [
38 "custom_inherit==2.3.2",
39 'junitparser>=2.1.1',
40 'coverage[toml]>=6.1.1',
41 'fasteners>=0.16.3', # run_directory_uploader tests require fasteners
42 'pytest>=6.2.0',
43 'yapf>=0.13.0',
44 'isort>=5.9.3',
45 'ipython>=7.29.0',
46 'ipykernel>=6.5.0',
47 'jupyter>=1.0.0',
48 'yamllint>=1.26.2',
49 'pytest-timeout>=1.4.2',
50 'recommonmark>=0.7.1',
51 'sphinx>=4.2.0',
52 'sphinx_copybutton>=0.4.0',
53 'sphinx_markdown_tables>=0.0.15',
54 'sphinx-argparse>=0.3.1',
55 'sphinxcontrib.katex>=0.8.6',
56 'sphinxext.opengraph>=0.4.2',
57 'sphinxemoji>=0.2.0',
58 'sphinx_rtd_theme>=1.0.0',
59 'testbook>=0.4.2',
60 'myst-parser>=0.15.2',
61 ]
62 extra_deps['logging'] = ['wandb>=0.12.2', 'apache-libcloud>=3.4.1']
63
64 extra_deps['nlp'] = [
65 'transformers>=4.11.3',
66 'datasets>=1.14.0',
67 ]
68
69 extra_deps['unet'] = [
70 'monai>=0.7.0',
71 'scikit-learn>=1.0.1',
72 ]
73
74 extra_deps['deepspeed'] = [
75 'deepspeed>=0.5.5',
76 ]
77
78 extra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)
79
80 setup(
81 name="mosaicml",
82 version="0.3.1",
83 author="MosaicML",
84 author_email="[email protected]",
85 description="composing methods for ML training efficiency",
86 long_description=long_description,
87 long_description_content_type="text/markdown",
88 url="https://github.com/mosaicml/composer",
89 include_package_data=True,
90 package_data={
91 "composer": ['py.typed'],
92 "": package_files('composer/yamls'),
93 },
94 packages=setuptools.find_packages(exclude=["tests*"]),
95 classifiers=[
96 "Programming Language :: Python :: 3",
97 ],
98 install_requires=install_requires,
99 entry_points={
100 'console_scripts': ['composer = composer.cli.launcher:main',],
101 },
102 extras_require=extra_deps,
103 dependency_links=['https://developer.download.nvidia.com/compute/redist'],
104 python_requires='>=3.7',
105 ext_package="composer",
106 )
107
108 # only visible if user installs with verbose -v flag
109 # Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)
110 print("*" * 20, file=sys.stderr)
111 print(textwrap.dedent("""NOTE: For best performance, we recommend installing Pillow-SIMD
112 for accelerated image processing operations. To install:
113 \t pip uninstall pillow && pip install pillow-simd"""),
114 file=sys.stderr)
115 print("*" * 20, file=sys.stderr)
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,12 +1,16 @@
# Copyright 2021 MosaicML. All Rights Reserved.
import os
+import site
import sys
import textwrap
import setuptools
from setuptools import setup
+# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255
+site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
+
def package_files(directory: str):
# from https://stackoverflow.com/a/36693250
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,12 +1,16 @@\n # Copyright 2021 MosaicML. All Rights Reserved.\n \n import os\n+import site\n import sys\n import textwrap\n \n import setuptools\n from setuptools import setup\n \n+# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255\n+site.ENABLE_USER_SITE = \"--user\" in sys.argv[1:]\n+\n \n def package_files(directory: str):\n # from https://stackoverflow.com/a/36693250\n", "issue": "Add venv into docker image to enable editable `pip install`\nWhen trying to install composer with `pip install -e .` from within the docker image, we are seeing this error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/bin/composer\", line 33, in <module>\r\n sys.exit(load_entry_point('mosaicml', 'console_scripts', 'composer')())\r\n File \"/usr/bin/composer\", line 22, in importlib_load_entry_point\r\n for entry_point in distribution(dist_name).entry_points\r\n File \"/usr/lib/python3.8/importlib/metadata.py\", line 445, in distribution\r\n return Distribution.from_name(distribution_name)\r\n File \"/usr/lib/python3.8/importlib/metadata.py\", line 169, in from_name\r\n raise PackageNotFoundError(name)\r\nimportlib.metadata.PackageNotFoundError: mosaicml\r\n```\r\nThis seems to be remedied by running the `pip install` from within a virtualenv. Can we bake a virtualenv into the docker image as a workaround?\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport os\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools import setup\n\n\ndef package_files(directory: str):\n # from https://stackoverflow.com/a/36693250\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ninstall_requires = [\n \"pyyaml>=5.4.1\",\n \"tqdm>=4.62.3\",\n \"torchmetrics>=0.6.0\",\n \"torch_optimizer==0.1.0\",\n \"torchvision>=0.9.0\",\n \"torch>=1.9\",\n \"yahp>=0.0.14\",\n \"numpy==1.21.5\",\n]\nextra_deps = {}\n\nextra_deps['base'] = []\n\nextra_deps['dev'] = [\n \"custom_inherit==2.3.2\",\n 'junitparser>=2.1.1',\n 'coverage[toml]>=6.1.1',\n 'fasteners>=0.16.3', # run_directory_uploader tests require fasteners\n 'pytest>=6.2.0',\n 'yapf>=0.13.0',\n 'isort>=5.9.3',\n 'ipython>=7.29.0',\n 'ipykernel>=6.5.0',\n 'jupyter>=1.0.0',\n 'yamllint>=1.26.2',\n 'pytest-timeout>=1.4.2',\n 'recommonmark>=0.7.1',\n 'sphinx>=4.2.0',\n 'sphinx_copybutton>=0.4.0',\n 'sphinx_markdown_tables>=0.0.15',\n 'sphinx-argparse>=0.3.1',\n 'sphinxcontrib.katex>=0.8.6',\n 'sphinxext.opengraph>=0.4.2',\n 'sphinxemoji>=0.2.0',\n 'sphinx_rtd_theme>=1.0.0',\n 'testbook>=0.4.2',\n 'myst-parser>=0.15.2',\n]\nextra_deps['logging'] = ['wandb>=0.12.2', 'apache-libcloud>=3.4.1']\n\nextra_deps['nlp'] = [\n 'transformers>=4.11.3',\n 'datasets>=1.14.0',\n]\n\nextra_deps['unet'] = [\n 'monai>=0.7.0',\n 'scikit-learn>=1.0.1',\n]\n\nextra_deps['deepspeed'] = [\n 'deepspeed>=0.5.5',\n]\n\nextra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)\n\nsetup(\n name=\"mosaicml\",\n version=\"0.3.1\",\n author=\"MosaicML\",\n author_email=\"[email protected]\",\n description=\"composing methods for ML training efficiency\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mosaicml/composer\",\n include_package_data=True,\n package_data={\n \"composer\": ['py.typed'],\n \"\": package_files('composer/yamls'),\n },\n packages=setuptools.find_packages(exclude=[\"tests*\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n ],\n install_requires=install_requires,\n entry_points={\n 'console_scripts': ['composer = composer.cli.launcher:main',],\n },\n extras_require=extra_deps,\n dependency_links=['https://developer.download.nvidia.com/compute/redist'],\n python_requires='>=3.7',\n ext_package=\"composer\",\n)\n\n# only visible if user installs with verbose -v flag\n# Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)\nprint(\"*\" * 20, file=sys.stderr)\nprint(textwrap.dedent(\"\"\"NOTE: For best performance, we recommend installing Pillow-SIMD\n for accelerated image processing operations. To install:\n \\t pip uninstall pillow && pip install pillow-simd\"\"\"),\n file=sys.stderr)\nprint(\"*\" * 20, file=sys.stderr)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport os\nimport site\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools import setup\n\n# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255\nsite.ENABLE_USER_SITE = \"--user\" in sys.argv[1:]\n\n\ndef package_files(directory: str):\n # from https://stackoverflow.com/a/36693250\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ninstall_requires = [\n \"pyyaml>=5.4.1\",\n \"tqdm>=4.62.3\",\n \"torchmetrics>=0.6.0\",\n \"torch_optimizer==0.1.0\",\n \"torchvision>=0.9.0\",\n \"torch>=1.9\",\n \"yahp>=0.0.14\",\n \"numpy==1.21.5\",\n]\nextra_deps = {}\n\nextra_deps['base'] = []\n\nextra_deps['dev'] = [\n \"custom_inherit==2.3.2\",\n 'junitparser>=2.1.1',\n 'coverage[toml]>=6.1.1',\n 'fasteners>=0.16.3', # run_directory_uploader tests require fasteners\n 'pytest>=6.2.0',\n 'yapf>=0.13.0',\n 'isort>=5.9.3',\n 'ipython>=7.29.0',\n 'ipykernel>=6.5.0',\n 'jupyter>=1.0.0',\n 'yamllint>=1.26.2',\n 'pytest-timeout>=1.4.2',\n 'recommonmark>=0.7.1',\n 'sphinx>=4.2.0',\n 'sphinx_copybutton>=0.4.0',\n 'sphinx_markdown_tables>=0.0.15',\n 'sphinx-argparse>=0.3.1',\n 'sphinxcontrib.katex>=0.8.6',\n 'sphinxext.opengraph>=0.4.2',\n 'sphinxemoji>=0.2.0',\n 'sphinx_rtd_theme>=1.0.0',\n 'testbook>=0.4.2',\n 'myst-parser>=0.15.2',\n]\nextra_deps['logging'] = ['wandb>=0.12.2', 'apache-libcloud>=3.4.1']\n\nextra_deps['nlp'] = [\n 'transformers>=4.11.3',\n 'datasets>=1.14.0',\n]\n\nextra_deps['unet'] = [\n 'monai>=0.7.0',\n 'scikit-learn>=1.0.1',\n]\n\nextra_deps['deepspeed'] = [\n 'deepspeed>=0.5.5',\n]\n\nextra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)\n\nsetup(\n name=\"mosaicml\",\n version=\"0.3.1\",\n author=\"MosaicML\",\n author_email=\"[email protected]\",\n description=\"composing methods for ML training efficiency\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mosaicml/composer\",\n include_package_data=True,\n package_data={\n \"composer\": ['py.typed'],\n \"\": package_files('composer/yamls'),\n },\n packages=setuptools.find_packages(exclude=[\"tests*\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n ],\n install_requires=install_requires,\n entry_points={\n 'console_scripts': ['composer = composer.cli.launcher:main',],\n },\n extras_require=extra_deps,\n dependency_links=['https://developer.download.nvidia.com/compute/redist'],\n python_requires='>=3.7',\n ext_package=\"composer\",\n)\n\n# only visible if user installs with verbose -v flag\n# Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)\nprint(\"*\" * 20, file=sys.stderr)\nprint(textwrap.dedent(\"\"\"NOTE: For best performance, we recommend installing Pillow-SIMD\n for accelerated image processing operations. To install:\n \\t pip uninstall pillow && pip install pillow-simd\"\"\"),\n file=sys.stderr)\nprint(\"*\" * 20, file=sys.stderr)\n", "path": "setup.py"}]} | 1,712 | 144 |
gh_patches_debug_15609 | rasdani/github-patches | git_diff | tensorflow__addons-2355 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Compile with AVX only
Seems that TF2.4.0 is accidentally compiled with AVX2 (or more, not sure what's the CPU spec on TF release CI), and we follow it in https://github.com/tensorflow/addons/pull/2299. We should fallback to subset of ISAs, probably AVX, once there is a new release.
https://github.com/tensorflow/tensorflow/pull/46229
/cc @seanpmorgan
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `configure.py`
Content:
```
1 # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 # Usage: python configure.py
16 #
17
18
19 import os
20 import pathlib
21 import platform
22 import logging
23
24 import tensorflow as tf
25
26 _TFA_BAZELRC = ".bazelrc"
27
28
29 # Writes variables to bazelrc file
30 def write(line):
31 with open(_TFA_BAZELRC, "a") as f:
32 f.write(line + "\n")
33
34
35 def write_action_env(var_name, var):
36 write('build --action_env {}="{}"'.format(var_name, var))
37
38
39 def is_macos():
40 return platform.system() == "Darwin"
41
42
43 def is_windows():
44 return platform.system() == "Windows"
45
46
47 def is_linux():
48 return platform.system() == "Linux"
49
50
51 def is_raspi_arm():
52 return os.uname()[4] == "armv7l"
53
54
55 def get_tf_header_dir():
56 import tensorflow as tf
57
58 tf_header_dir = tf.sysconfig.get_compile_flags()[0][2:]
59 if is_windows():
60 tf_header_dir = tf_header_dir.replace("\\", "/")
61 return tf_header_dir
62
63
64 def get_tf_shared_lib_dir():
65 import tensorflow as tf
66
67 # OS Specific parsing
68 if is_windows():
69 tf_shared_lib_dir = tf.sysconfig.get_compile_flags()[0][2:-7] + "python"
70 return tf_shared_lib_dir.replace("\\", "/")
71 elif is_raspi_arm():
72 return tf.sysconfig.get_compile_flags()[0][2:-7] + "python"
73 else:
74 return tf.sysconfig.get_link_flags()[0][2:]
75
76
77 # Converts the linkflag namespec to the full shared library name
78 def get_shared_lib_name():
79 import tensorflow as tf
80
81 namespec = tf.sysconfig.get_link_flags()
82 if is_macos():
83 # MacOS
84 return "lib" + namespec[1][2:] + ".dylib"
85 elif is_windows():
86 # Windows
87 return "_pywrap_tensorflow_internal.lib"
88 elif is_raspi_arm():
89 # The below command for linux would return an empty list
90 return "_pywrap_tensorflow_internal.so"
91 else:
92 # Linux
93 return namespec[1][3:]
94
95
96 def create_build_configuration():
97 print()
98 print("Configuring TensorFlow Addons to be built from source...")
99
100 if os.path.isfile(_TFA_BAZELRC):
101 os.remove(_TFA_BAZELRC)
102
103 logging.disable(logging.WARNING)
104
105 write_action_env("TF_HEADER_DIR", get_tf_header_dir())
106 write_action_env("TF_SHARED_LIBRARY_DIR", get_tf_shared_lib_dir())
107 write_action_env("TF_SHARED_LIBRARY_NAME", get_shared_lib_name())
108 write_action_env("TF_CXX11_ABI_FLAG", tf.sysconfig.CXX11_ABI_FLAG)
109
110 write("build --spawn_strategy=standalone")
111 write("build --strategy=Genrule=standalone")
112 write("build -c opt")
113
114 if is_windows():
115 write("build --config=windows")
116 write("build:windows --enable_runfiles")
117 write("build:windows --copt=/experimental:preprocessor")
118 write("build:windows --host_copt=/experimental:preprocessor")
119 write("build:windows --copt=/arch=AVX2")
120 write("build:windows --cxxopt=/std:c++14")
121 write("build:windows --host_cxxopt=/std:c++14")
122
123 if is_macos() or is_linux():
124 write("build --copt=-mavx2")
125 write("build --cxxopt=-std=c++14")
126 write("build --host_cxxopt=-std=c++14")
127
128 if os.getenv("TF_NEED_CUDA", "0") == "1":
129 print("> Building GPU & CPU ops")
130 configure_cuda()
131 else:
132 print("> Building only CPU ops")
133
134 print()
135 print("Build configurations successfully written to", _TFA_BAZELRC, ":\n")
136 print(pathlib.Path(_TFA_BAZELRC).read_text())
137
138
139 def configure_cuda():
140 write_action_env("TF_NEED_CUDA", "1")
141 write_action_env(
142 "CUDA_TOOLKIT_PATH", os.getenv("CUDA_TOOLKIT_PATH", "/usr/local/cuda")
143 )
144 write_action_env(
145 "CUDNN_INSTALL_PATH",
146 os.getenv("CUDNN_INSTALL_PATH", "/usr/lib/x86_64-linux-gnu"),
147 )
148 write_action_env("TF_CUDA_VERSION", os.getenv("TF_CUDA_VERSION", "11"))
149 write_action_env("TF_CUDNN_VERSION", os.getenv("TF_CUDNN_VERSION", "8"))
150
151 write("test --config=cuda")
152 write("build --config=cuda")
153 write("build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true")
154 write("build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain")
155
156
157 if __name__ == "__main__":
158 create_build_configuration()
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/configure.py b/configure.py
--- a/configure.py
+++ b/configure.py
@@ -116,12 +116,12 @@
write("build:windows --enable_runfiles")
write("build:windows --copt=/experimental:preprocessor")
write("build:windows --host_copt=/experimental:preprocessor")
- write("build:windows --copt=/arch=AVX2")
+ write("build:windows --copt=/arch=AVX")
write("build:windows --cxxopt=/std:c++14")
write("build:windows --host_cxxopt=/std:c++14")
if is_macos() or is_linux():
- write("build --copt=-mavx2")
+ write("build --copt=-mavx")
write("build --cxxopt=-std=c++14")
write("build --host_cxxopt=-std=c++14")
| {"golden_diff": "diff --git a/configure.py b/configure.py\n--- a/configure.py\n+++ b/configure.py\n@@ -116,12 +116,12 @@\n write(\"build:windows --enable_runfiles\")\n write(\"build:windows --copt=/experimental:preprocessor\")\n write(\"build:windows --host_copt=/experimental:preprocessor\")\n- write(\"build:windows --copt=/arch=AVX2\")\n+ write(\"build:windows --copt=/arch=AVX\")\n write(\"build:windows --cxxopt=/std:c++14\")\n write(\"build:windows --host_cxxopt=/std:c++14\")\n \n if is_macos() or is_linux():\n- write(\"build --copt=-mavx2\")\n+ write(\"build --copt=-mavx\")\n write(\"build --cxxopt=-std=c++14\")\n write(\"build --host_cxxopt=-std=c++14\")\n", "issue": "Compile with AVX only\nSeems that TF2.4.0 is accidentally compiled with AVX2 (or more, not sure what's the CPU spec on TF release CI), and we follow it in https://github.com/tensorflow/addons/pull/2299. We should fallback to subset of ISAs, probably AVX, once there is a new release.\r\n\r\nhttps://github.com/tensorflow/tensorflow/pull/46229\r\n\r\n/cc @seanpmorgan \n", "before_files": [{"content": "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# Usage: python configure.py\n#\n\n\nimport os\nimport pathlib\nimport platform\nimport logging\n\nimport tensorflow as tf\n\n_TFA_BAZELRC = \".bazelrc\"\n\n\n# Writes variables to bazelrc file\ndef write(line):\n with open(_TFA_BAZELRC, \"a\") as f:\n f.write(line + \"\\n\")\n\n\ndef write_action_env(var_name, var):\n write('build --action_env {}=\"{}\"'.format(var_name, var))\n\n\ndef is_macos():\n return platform.system() == \"Darwin\"\n\n\ndef is_windows():\n return platform.system() == \"Windows\"\n\n\ndef is_linux():\n return platform.system() == \"Linux\"\n\n\ndef is_raspi_arm():\n return os.uname()[4] == \"armv7l\"\n\n\ndef get_tf_header_dir():\n import tensorflow as tf\n\n tf_header_dir = tf.sysconfig.get_compile_flags()[0][2:]\n if is_windows():\n tf_header_dir = tf_header_dir.replace(\"\\\\\", \"/\")\n return tf_header_dir\n\n\ndef get_tf_shared_lib_dir():\n import tensorflow as tf\n\n # OS Specific parsing\n if is_windows():\n tf_shared_lib_dir = tf.sysconfig.get_compile_flags()[0][2:-7] + \"python\"\n return tf_shared_lib_dir.replace(\"\\\\\", \"/\")\n elif is_raspi_arm():\n return tf.sysconfig.get_compile_flags()[0][2:-7] + \"python\"\n else:\n return tf.sysconfig.get_link_flags()[0][2:]\n\n\n# Converts the linkflag namespec to the full shared library name\ndef get_shared_lib_name():\n import tensorflow as tf\n\n namespec = tf.sysconfig.get_link_flags()\n if is_macos():\n # MacOS\n return \"lib\" + namespec[1][2:] + \".dylib\"\n elif is_windows():\n # Windows\n return \"_pywrap_tensorflow_internal.lib\"\n elif is_raspi_arm():\n # The below command for linux would return an empty list\n return \"_pywrap_tensorflow_internal.so\"\n else:\n # Linux\n return namespec[1][3:]\n\n\ndef create_build_configuration():\n print()\n print(\"Configuring TensorFlow Addons to be built from source...\")\n\n if os.path.isfile(_TFA_BAZELRC):\n os.remove(_TFA_BAZELRC)\n\n logging.disable(logging.WARNING)\n\n write_action_env(\"TF_HEADER_DIR\", get_tf_header_dir())\n write_action_env(\"TF_SHARED_LIBRARY_DIR\", get_tf_shared_lib_dir())\n write_action_env(\"TF_SHARED_LIBRARY_NAME\", get_shared_lib_name())\n write_action_env(\"TF_CXX11_ABI_FLAG\", tf.sysconfig.CXX11_ABI_FLAG)\n\n write(\"build --spawn_strategy=standalone\")\n write(\"build --strategy=Genrule=standalone\")\n write(\"build -c opt\")\n\n if is_windows():\n write(\"build --config=windows\")\n write(\"build:windows --enable_runfiles\")\n write(\"build:windows --copt=/experimental:preprocessor\")\n write(\"build:windows --host_copt=/experimental:preprocessor\")\n write(\"build:windows --copt=/arch=AVX2\")\n write(\"build:windows --cxxopt=/std:c++14\")\n write(\"build:windows --host_cxxopt=/std:c++14\")\n\n if is_macos() or is_linux():\n write(\"build --copt=-mavx2\")\n write(\"build --cxxopt=-std=c++14\")\n write(\"build --host_cxxopt=-std=c++14\")\n\n if os.getenv(\"TF_NEED_CUDA\", \"0\") == \"1\":\n print(\"> Building GPU & CPU ops\")\n configure_cuda()\n else:\n print(\"> Building only CPU ops\")\n\n print()\n print(\"Build configurations successfully written to\", _TFA_BAZELRC, \":\\n\")\n print(pathlib.Path(_TFA_BAZELRC).read_text())\n\n\ndef configure_cuda():\n write_action_env(\"TF_NEED_CUDA\", \"1\")\n write_action_env(\n \"CUDA_TOOLKIT_PATH\", os.getenv(\"CUDA_TOOLKIT_PATH\", \"/usr/local/cuda\")\n )\n write_action_env(\n \"CUDNN_INSTALL_PATH\",\n os.getenv(\"CUDNN_INSTALL_PATH\", \"/usr/lib/x86_64-linux-gnu\"),\n )\n write_action_env(\"TF_CUDA_VERSION\", os.getenv(\"TF_CUDA_VERSION\", \"11\"))\n write_action_env(\"TF_CUDNN_VERSION\", os.getenv(\"TF_CUDNN_VERSION\", \"8\"))\n\n write(\"test --config=cuda\")\n write(\"build --config=cuda\")\n write(\"build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true\")\n write(\"build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain\")\n\n\nif __name__ == \"__main__\":\n create_build_configuration()\n", "path": "configure.py"}], "after_files": [{"content": "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# Usage: python configure.py\n#\n\n\nimport os\nimport pathlib\nimport platform\nimport logging\n\nimport tensorflow as tf\n\n_TFA_BAZELRC = \".bazelrc\"\n\n\n# Writes variables to bazelrc file\ndef write(line):\n with open(_TFA_BAZELRC, \"a\") as f:\n f.write(line + \"\\n\")\n\n\ndef write_action_env(var_name, var):\n write('build --action_env {}=\"{}\"'.format(var_name, var))\n\n\ndef is_macos():\n return platform.system() == \"Darwin\"\n\n\ndef is_windows():\n return platform.system() == \"Windows\"\n\n\ndef is_linux():\n return platform.system() == \"Linux\"\n\n\ndef is_raspi_arm():\n return os.uname()[4] == \"armv7l\"\n\n\ndef get_tf_header_dir():\n import tensorflow as tf\n\n tf_header_dir = tf.sysconfig.get_compile_flags()[0][2:]\n if is_windows():\n tf_header_dir = tf_header_dir.replace(\"\\\\\", \"/\")\n return tf_header_dir\n\n\ndef get_tf_shared_lib_dir():\n import tensorflow as tf\n\n # OS Specific parsing\n if is_windows():\n tf_shared_lib_dir = tf.sysconfig.get_compile_flags()[0][2:-7] + \"python\"\n return tf_shared_lib_dir.replace(\"\\\\\", \"/\")\n elif is_raspi_arm():\n return tf.sysconfig.get_compile_flags()[0][2:-7] + \"python\"\n else:\n return tf.sysconfig.get_link_flags()[0][2:]\n\n\n# Converts the linkflag namespec to the full shared library name\ndef get_shared_lib_name():\n import tensorflow as tf\n\n namespec = tf.sysconfig.get_link_flags()\n if is_macos():\n # MacOS\n return \"lib\" + namespec[1][2:] + \".dylib\"\n elif is_windows():\n # Windows\n return \"_pywrap_tensorflow_internal.lib\"\n elif is_raspi_arm():\n # The below command for linux would return an empty list\n return \"_pywrap_tensorflow_internal.so\"\n else:\n # Linux\n return namespec[1][3:]\n\n\ndef create_build_configuration():\n print()\n print(\"Configuring TensorFlow Addons to be built from source...\")\n\n if os.path.isfile(_TFA_BAZELRC):\n os.remove(_TFA_BAZELRC)\n\n logging.disable(logging.WARNING)\n\n write_action_env(\"TF_HEADER_DIR\", get_tf_header_dir())\n write_action_env(\"TF_SHARED_LIBRARY_DIR\", get_tf_shared_lib_dir())\n write_action_env(\"TF_SHARED_LIBRARY_NAME\", get_shared_lib_name())\n write_action_env(\"TF_CXX11_ABI_FLAG\", tf.sysconfig.CXX11_ABI_FLAG)\n\n write(\"build --spawn_strategy=standalone\")\n write(\"build --strategy=Genrule=standalone\")\n write(\"build -c opt\")\n\n if is_windows():\n write(\"build --config=windows\")\n write(\"build:windows --enable_runfiles\")\n write(\"build:windows --copt=/experimental:preprocessor\")\n write(\"build:windows --host_copt=/experimental:preprocessor\")\n write(\"build:windows --copt=/arch=AVX\")\n write(\"build:windows --cxxopt=/std:c++14\")\n write(\"build:windows --host_cxxopt=/std:c++14\")\n\n if is_macos() or is_linux():\n write(\"build --copt=-mavx\")\n write(\"build --cxxopt=-std=c++14\")\n write(\"build --host_cxxopt=-std=c++14\")\n\n if os.getenv(\"TF_NEED_CUDA\", \"0\") == \"1\":\n print(\"> Building GPU & CPU ops\")\n configure_cuda()\n else:\n print(\"> Building only CPU ops\")\n\n print()\n print(\"Build configurations successfully written to\", _TFA_BAZELRC, \":\\n\")\n print(pathlib.Path(_TFA_BAZELRC).read_text())\n\n\ndef configure_cuda():\n write_action_env(\"TF_NEED_CUDA\", \"1\")\n write_action_env(\n \"CUDA_TOOLKIT_PATH\", os.getenv(\"CUDA_TOOLKIT_PATH\", \"/usr/local/cuda\")\n )\n write_action_env(\n \"CUDNN_INSTALL_PATH\",\n os.getenv(\"CUDNN_INSTALL_PATH\", \"/usr/lib/x86_64-linux-gnu\"),\n )\n write_action_env(\"TF_CUDA_VERSION\", os.getenv(\"TF_CUDA_VERSION\", \"11\"))\n write_action_env(\"TF_CUDNN_VERSION\", os.getenv(\"TF_CUDNN_VERSION\", \"8\"))\n\n write(\"test --config=cuda\")\n write(\"build --config=cuda\")\n write(\"build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true\")\n write(\"build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain\")\n\n\nif __name__ == \"__main__\":\n create_build_configuration()\n", "path": "configure.py"}]} | 1,951 | 217 |
gh_patches_debug_56595 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-3045 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The virsh_list_all parser is raising ValueError exceptions in production
The VirshListAll parser is throwing a large number of the exception ValueError("Line containing 'Id,Name,State' was not found in table",) in production.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/parsers/virsh_list_all.py`
Content:
```
1 """VirshListAll - command ``virsh --readonly list --all``
2 =========================================================
3
4 This module provides VM status using output of command ``virsh --readonly list --all``.
5 """
6 from collections import namedtuple
7
8 from insights.specs import Specs
9 from .. import CommandParser, parser
10 from . import parse_fixed_table, keyword_search
11
12
13 @parser(Specs.virsh_list_all)
14 class VirshListAll(CommandParser):
15 """Parsing output of ``virsh --readonly list --all``.
16
17 Typical output of ``virsh --readonly list --all`` command is::
18
19 Id Name State
20 ----------------------------------------------------
21 2 rhel7.4 running
22 4 rhel7.0 paused
23 - centos6.8-router shut off
24 - cfme-5.7.13 shut off
25 - cfme-rhos-5.9.0.15 shut off
26 - fedora-24-kernel shut off
27 - fedora-saio_fedoraSaio shut off
28 - fedora24-misc shut off
29 - freebsd11.0 shut off
30 - guixSD shut off
31 - miq-gap-1 shut off
32 - rhel7.2 shut off
33 - RHOSP10 shut off
34
35
36 Examples:
37
38 >>> len(output.search(state='shut off')) == 11
39 True
40 >>> len(output.search(id=None)) == 11
41 True
42 >>> len(output.search(id=2)) == 1
43 True
44 >>> output.search(name='rhel7.4') == [{'state': 'running', 'id': 2, 'name': 'rhel7.4'}]
45 True
46 >>> output.get_vm_state('rhel7.0') == 'paused'
47 True
48 >>> output.get_vm_state('rhel9.0') is None
49 True
50 >>> 'cfme' in output
51 False
52 >>> 'cfme-5.7.13' in output
53 True
54
55 Attributes:
56 fields (list): List of ``KeyValue`` namedtupules for each line
57 in the command.
58
59 cols (list): List id key value pair derived from the command.
60
61 keywords (list): keywords present in the command, each
62 keyword is converted to lowercase.
63
64 """
65 keyvalue = namedtuple('KeyValue',
66 ['name', 'state', 'id', 'name_lower'])
67 """namedtuple: Represent name value pair as a namedtuple with case."""
68 def _cleanup(self):
69 for col in self.cols:
70 if col['id'] == '-':
71 col['id'] = None
72 else:
73 col['id'] = (lambda x: int(x) if x.isdigit() else x)(col['id'])
74
75 def parse_content(self, content):
76 self.fields = []
77 self.cols = []
78 self.keywords = []
79 if not content:
80 return
81
82 self.cols = parse_fixed_table(content,
83 heading_ignore=['Id', 'Name', 'State'],
84 header_substitute=[('Id', 'id'), ('Name', 'name'), ('State', 'state')])[1:] # noqa
85 self._cleanup()
86
87 for item in self.cols:
88 self.fields.append(self.keyvalue(item['name'], item['state'], item['id'], item['name'].lower())) # noqa
89 self.keywords = [name.name_lower for name in self.fields]
90
91 def __contains__(self, keyword):
92 return keyword.lower() in self.keywords
93
94 def __iter__(self):
95 return iter(self.fields)
96
97 def search(self, **kw):
98 '''Search item based on key value pair.
99
100 Example:
101
102 >>> len(output.search(state='shut off')) == 11
103 True
104 >>> len(output.search(id=None)) == 11
105 True
106 >>> len(output.search(id=2)) == 1
107 True
108 '''
109 return keyword_search(self.cols, **kw)
110
111 def get_vm_state(self, vmname):
112 '''Get VM state associated with vmname
113
114 Typical output is ``virsh --readonly list --all`` command::
115
116 Id Name State
117 ----------------------------------------------------
118 2 rhel7.4 running
119 4 rhel7.0 paused
120
121
122 Example:
123
124 >>> output.get_vm_state('rhel7.0')
125 'paused'
126
127 Args:
128
129 vmname (str): A key. For ex. ``rhel7.0``.
130
131 Returns:
132
133 str: State of VM. Returns None if, ``vmname`` does not exist.
134 '''
135 if vmname.lower() in self.keywords:
136 return self.search(name=vmname)[0]['state']
137 return None
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/insights/parsers/virsh_list_all.py b/insights/parsers/virsh_list_all.py
--- a/insights/parsers/virsh_list_all.py
+++ b/insights/parsers/virsh_list_all.py
@@ -76,6 +76,10 @@
self.fields = []
self.cols = []
self.keywords = []
+ # Check and remove any error message, or empty lines. This to
+ # prevent any ValueError exceptions when parse_fixed_table is
+ # called below.
+ content = [l for l in content if not l.startswith("error: ") and l != ""]
if not content:
return
| {"golden_diff": "diff --git a/insights/parsers/virsh_list_all.py b/insights/parsers/virsh_list_all.py\n--- a/insights/parsers/virsh_list_all.py\n+++ b/insights/parsers/virsh_list_all.py\n@@ -76,6 +76,10 @@\n self.fields = []\n self.cols = []\n self.keywords = []\n+ # Check and remove any error message, or empty lines. This to\n+ # prevent any ValueError exceptions when parse_fixed_table is\n+ # called below.\n+ content = [l for l in content if not l.startswith(\"error: \") and l != \"\"]\n if not content:\n return\n", "issue": "The virsh_list_all parser is raising ValueError exceptions in production\nThe VirshListAll parser is throwing a large number of the exception ValueError(\"Line containing 'Id,Name,State' was not found in table\",) in production.\n", "before_files": [{"content": "\"\"\"VirshListAll - command ``virsh --readonly list --all``\n=========================================================\n\nThis module provides VM status using output of command ``virsh --readonly list --all``.\n\"\"\"\nfrom collections import namedtuple\n\nfrom insights.specs import Specs\nfrom .. import CommandParser, parser\nfrom . import parse_fixed_table, keyword_search\n\n\n@parser(Specs.virsh_list_all)\nclass VirshListAll(CommandParser):\n \"\"\"Parsing output of ``virsh --readonly list --all``.\n\n Typical output of ``virsh --readonly list --all`` command is::\n\n Id Name State\n ----------------------------------------------------\n 2 rhel7.4 running\n 4 rhel7.0 paused\n - centos6.8-router shut off\n - cfme-5.7.13 shut off\n - cfme-rhos-5.9.0.15 shut off\n - fedora-24-kernel shut off\n - fedora-saio_fedoraSaio shut off\n - fedora24-misc shut off\n - freebsd11.0 shut off\n - guixSD shut off\n - miq-gap-1 shut off\n - rhel7.2 shut off\n - RHOSP10 shut off\n\n\n Examples:\n\n >>> len(output.search(state='shut off')) == 11\n True\n >>> len(output.search(id=None)) == 11\n True\n >>> len(output.search(id=2)) == 1\n True\n >>> output.search(name='rhel7.4') == [{'state': 'running', 'id': 2, 'name': 'rhel7.4'}]\n True\n >>> output.get_vm_state('rhel7.0') == 'paused'\n True\n >>> output.get_vm_state('rhel9.0') is None\n True\n >>> 'cfme' in output\n False\n >>> 'cfme-5.7.13' in output\n True\n\n Attributes:\n fields (list): List of ``KeyValue`` namedtupules for each line\n in the command.\n\n cols (list): List id key value pair derived from the command.\n\n keywords (list): keywords present in the command, each\n keyword is converted to lowercase.\n\n \"\"\"\n keyvalue = namedtuple('KeyValue',\n ['name', 'state', 'id', 'name_lower'])\n \"\"\"namedtuple: Represent name value pair as a namedtuple with case.\"\"\"\n def _cleanup(self):\n for col in self.cols:\n if col['id'] == '-':\n col['id'] = None\n else:\n col['id'] = (lambda x: int(x) if x.isdigit() else x)(col['id'])\n\n def parse_content(self, content):\n self.fields = []\n self.cols = []\n self.keywords = []\n if not content:\n return\n\n self.cols = parse_fixed_table(content,\n heading_ignore=['Id', 'Name', 'State'],\n header_substitute=[('Id', 'id'), ('Name', 'name'), ('State', 'state')])[1:] # noqa\n self._cleanup()\n\n for item in self.cols:\n self.fields.append(self.keyvalue(item['name'], item['state'], item['id'], item['name'].lower())) # noqa\n self.keywords = [name.name_lower for name in self.fields]\n\n def __contains__(self, keyword):\n return keyword.lower() in self.keywords\n\n def __iter__(self):\n return iter(self.fields)\n\n def search(self, **kw):\n '''Search item based on key value pair.\n\n Example:\n\n >>> len(output.search(state='shut off')) == 11\n True\n >>> len(output.search(id=None)) == 11\n True\n >>> len(output.search(id=2)) == 1\n True\n '''\n return keyword_search(self.cols, **kw)\n\n def get_vm_state(self, vmname):\n '''Get VM state associated with vmname\n\n Typical output is ``virsh --readonly list --all`` command::\n\n Id Name State\n ----------------------------------------------------\n 2 rhel7.4 running\n 4 rhel7.0 paused\n\n\n Example:\n\n >>> output.get_vm_state('rhel7.0')\n 'paused'\n\n Args:\n\n vmname (str): A key. For ex. ``rhel7.0``.\n\n Returns:\n\n str: State of VM. Returns None if, ``vmname`` does not exist.\n '''\n if vmname.lower() in self.keywords:\n return self.search(name=vmname)[0]['state']\n return None\n", "path": "insights/parsers/virsh_list_all.py"}], "after_files": [{"content": "\"\"\"VirshListAll - command ``virsh --readonly list --all``\n=========================================================\n\nThis module provides VM status using output of command ``virsh --readonly list --all``.\n\"\"\"\nfrom collections import namedtuple\n\nfrom insights.specs import Specs\nfrom .. import CommandParser, parser\nfrom . import parse_fixed_table, keyword_search\n\n\n@parser(Specs.virsh_list_all)\nclass VirshListAll(CommandParser):\n \"\"\"Parsing output of ``virsh --readonly list --all``.\n\n Typical output of ``virsh --readonly list --all`` command is::\n\n Id Name State\n ----------------------------------------------------\n 2 rhel7.4 running\n 4 rhel7.0 paused\n - centos6.8-router shut off\n - cfme-5.7.13 shut off\n - cfme-rhos-5.9.0.15 shut off\n - fedora-24-kernel shut off\n - fedora-saio_fedoraSaio shut off\n - fedora24-misc shut off\n - freebsd11.0 shut off\n - guixSD shut off\n - miq-gap-1 shut off\n - rhel7.2 shut off\n - RHOSP10 shut off\n\n\n Examples:\n\n >>> len(output.search(state='shut off')) == 11\n True\n >>> len(output.search(id=None)) == 11\n True\n >>> len(output.search(id=2)) == 1\n True\n >>> output.search(name='rhel7.4') == [{'state': 'running', 'id': 2, 'name': 'rhel7.4'}]\n True\n >>> output.get_vm_state('rhel7.0') == 'paused'\n True\n >>> output.get_vm_state('rhel9.0') is None\n True\n >>> 'cfme' in output\n False\n >>> 'cfme-5.7.13' in output\n True\n\n Attributes:\n fields (list): List of ``KeyValue`` namedtupules for each line\n in the command.\n\n cols (list): List id key value pair derived from the command.\n\n keywords (list): keywords present in the command, each\n keyword is converted to lowercase.\n\n \"\"\"\n keyvalue = namedtuple('KeyValue',\n ['name', 'state', 'id', 'name_lower'])\n \"\"\"namedtuple: Represent name value pair as a namedtuple with case.\"\"\"\n def _cleanup(self):\n for col in self.cols:\n if col['id'] == '-':\n col['id'] = None\n else:\n col['id'] = (lambda x: int(x) if x.isdigit() else x)(col['id'])\n\n def parse_content(self, content):\n self.fields = []\n self.cols = []\n self.keywords = []\n # Check and remove any error message, or empty lines. This to\n # prevent any ValueError exceptions when parse_fixed_table is\n # called below.\n content = [l for l in content if not l.startswith(\"error: \") and l != \"\"]\n if not content:\n return\n\n self.cols = parse_fixed_table(content,\n heading_ignore=['Id', 'Name', 'State'],\n header_substitute=[('Id', 'id'), ('Name', 'name'), ('State', 'state')])[1:] # noqa\n self._cleanup()\n\n for item in self.cols:\n self.fields.append(self.keyvalue(item['name'], item['state'], item['id'], item['name'].lower())) # noqa\n self.keywords = [name.name_lower for name in self.fields]\n\n def __contains__(self, keyword):\n return keyword.lower() in self.keywords\n\n def __iter__(self):\n return iter(self.fields)\n\n def search(self, **kw):\n '''Search item based on key value pair.\n\n Example:\n\n >>> len(output.search(state='shut off')) == 11\n True\n >>> len(output.search(id=None)) == 11\n True\n >>> len(output.search(id=2)) == 1\n True\n '''\n return keyword_search(self.cols, **kw)\n\n def get_vm_state(self, vmname):\n '''Get VM state associated with vmname\n\n Typical output is ``virsh --readonly list --all`` command::\n\n Id Name State\n ----------------------------------------------------\n 2 rhel7.4 running\n 4 rhel7.0 paused\n\n\n Example:\n\n >>> output.get_vm_state('rhel7.0')\n 'paused'\n\n Args:\n\n vmname (str): A key. For ex. ``rhel7.0``.\n\n Returns:\n\n str: State of VM. Returns None if, ``vmname`` does not exist.\n '''\n if vmname.lower() in self.keywords:\n return self.search(name=vmname)[0]['state']\n return None\n", "path": "insights/parsers/virsh_list_all.py"}]} | 1,687 | 151 |
gh_patches_debug_15068 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-1708 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Installation issue
Hello,
I'm trying to install Geotrek following the documentation, and I have some problems.
At the very beginnig, when I run the install.sh, the script can't find the `etc/setting.ini` file. I checked, and I have no `'etc'` folder at all... So the install aborted.
I tried to create myself this folder and the `settings.ini` file with the variable expected (dbhost, dbname etc...). It works (the database is installed), but the install crash few step later when it try to install the python environnement. `Could not setup python environment !`
Did I miss something in the installation documentation ?
How can I fix this problem ?
Thanks for your help
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geotrek/zoning/factories.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import factory
3
4 from django.conf import settings
5 from django.contrib.gis.geos import Polygon, MultiPolygon
6
7 from mapentity.helpers import bbox_split_srid_2154
8
9 from geotrek.core.factories import TopologyFactory
10
11 from . import models
12
13
14 # Create 16 cities and 4 districts distinct same-area zone covering the spatial_extent and cycle on it
15 geom_city_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=4, by_y=4, cycle=True)
16 geom_district_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)
17 geom_area_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)
18
19
20 class CityFactory(factory.DjangoModelFactory):
21 class Meta:
22 model = models.City
23
24 code = factory.Sequence(lambda n: u"#%s" % n) # id (!) with max_length=6
25 name = factory.Sequence(lambda n: u"City name %s" % n)
26 geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_city_iter.next()), srid=settings.SRID))
27
28
29 class DistrictFactory(factory.DjangoModelFactory):
30 class Meta:
31 model = models.District
32
33 name = factory.Sequence(lambda n: u"District name %s" % n)
34 geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_district_iter.next()), srid=settings.SRID))
35
36
37 class RestrictedAreaTypeFactory(factory.DjangoModelFactory):
38
39 class Meta:
40 model = models.RestrictedAreaType
41
42 name = factory.Sequence(lambda n: u"Restricted name %s" % n)
43
44
45 class RestrictedAreaFactory(factory.DjangoModelFactory):
46 class Meta:
47 model = models.RestrictedArea
48
49 name = factory.Sequence(lambda n: u"Restricted area name %s" % n)
50 geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_area_iter.next()), srid=settings.SRID))
51 area_type = factory.SubFactory(RestrictedAreaTypeFactory)
52
53
54 class RestrictedAreaEdgeFactory(TopologyFactory):
55
56 class Meta:
57 model = models.RestrictedAreaEdge
58
59 restricted_area = factory.SubFactory(RestrictedAreaFactory)
60
61
62 class CityEdgeFactory(TopologyFactory):
63
64 class Meta:
65 model = models.CityEdge
66
67 city = factory.SubFactory(CityFactory)
68
69
70 class DistrictEdgeFactory(TopologyFactory):
71
72 class Meta:
73 model = models.DistrictEdge
74
75 district = factory.SubFactory(DistrictFactory)
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geotrek/zoning/factories.py b/geotrek/zoning/factories.py
--- a/geotrek/zoning/factories.py
+++ b/geotrek/zoning/factories.py
@@ -11,10 +11,13 @@
from . import models
+# Don't intersect with geom from PathFactory
+SPATIAL_EXTENT = (200000, 300000, 1100000, 1200000)
+
# Create 16 cities and 4 districts distinct same-area zone covering the spatial_extent and cycle on it
-geom_city_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=4, by_y=4, cycle=True)
-geom_district_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)
-geom_area_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)
+geom_city_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=4, by_y=4, cycle=True)
+geom_district_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)
+geom_area_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)
class CityFactory(factory.DjangoModelFactory):
| {"golden_diff": "diff --git a/geotrek/zoning/factories.py b/geotrek/zoning/factories.py\n--- a/geotrek/zoning/factories.py\n+++ b/geotrek/zoning/factories.py\n@@ -11,10 +11,13 @@\n from . import models\n \n \n+# Don't intersect with geom from PathFactory\n+SPATIAL_EXTENT = (200000, 300000, 1100000, 1200000)\n+\n # Create 16 cities and 4 districts distinct same-area zone covering the spatial_extent and cycle on it\n-geom_city_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=4, by_y=4, cycle=True)\n-geom_district_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\n-geom_area_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\n+geom_city_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=4, by_y=4, cycle=True)\n+geom_district_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\n+geom_area_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\n \n \n class CityFactory(factory.DjangoModelFactory):\n", "issue": "Installation issue\nHello,\r\nI'm trying to install Geotrek following the documentation, and I have some problems.\r\n\r\nAt the very beginnig, when I run the install.sh, the script can't find the `etc/setting.ini` file. I checked, and I have no `'etc'` folder at all... So the install aborted. \r\nI tried to create myself this folder and the `settings.ini` file with the variable expected (dbhost, dbname etc...). It works (the database is installed), but the install crash few step later when it try to install the python environnement. `Could not setup python environment !`\r\n\r\nDid I miss something in the installation documentation ?\r\nHow can I fix this problem ?\r\n\r\nThanks for your help\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport factory\n\nfrom django.conf import settings\nfrom django.contrib.gis.geos import Polygon, MultiPolygon\n\nfrom mapentity.helpers import bbox_split_srid_2154\n\nfrom geotrek.core.factories import TopologyFactory\n\nfrom . import models\n\n\n# Create 16 cities and 4 districts distinct same-area zone covering the spatial_extent and cycle on it\ngeom_city_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=4, by_y=4, cycle=True)\ngeom_district_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\ngeom_area_iter = bbox_split_srid_2154(settings.SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\n\n\nclass CityFactory(factory.DjangoModelFactory):\n class Meta:\n model = models.City\n\n code = factory.Sequence(lambda n: u\"#%s\" % n) # id (!) with max_length=6\n name = factory.Sequence(lambda n: u\"City name %s\" % n)\n geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_city_iter.next()), srid=settings.SRID))\n\n\nclass DistrictFactory(factory.DjangoModelFactory):\n class Meta:\n model = models.District\n\n name = factory.Sequence(lambda n: u\"District name %s\" % n)\n geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_district_iter.next()), srid=settings.SRID))\n\n\nclass RestrictedAreaTypeFactory(factory.DjangoModelFactory):\n\n class Meta:\n model = models.RestrictedAreaType\n\n name = factory.Sequence(lambda n: u\"Restricted name %s\" % n)\n\n\nclass RestrictedAreaFactory(factory.DjangoModelFactory):\n class Meta:\n model = models.RestrictedArea\n\n name = factory.Sequence(lambda n: u\"Restricted area name %s\" % n)\n geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_area_iter.next()), srid=settings.SRID))\n area_type = factory.SubFactory(RestrictedAreaTypeFactory)\n\n\nclass RestrictedAreaEdgeFactory(TopologyFactory):\n\n class Meta:\n model = models.RestrictedAreaEdge\n\n restricted_area = factory.SubFactory(RestrictedAreaFactory)\n\n\nclass CityEdgeFactory(TopologyFactory):\n\n class Meta:\n model = models.CityEdge\n\n city = factory.SubFactory(CityFactory)\n\n\nclass DistrictEdgeFactory(TopologyFactory):\n\n class Meta:\n model = models.DistrictEdge\n\n district = factory.SubFactory(DistrictFactory)\n", "path": "geotrek/zoning/factories.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport factory\n\nfrom django.conf import settings\nfrom django.contrib.gis.geos import Polygon, MultiPolygon\n\nfrom mapentity.helpers import bbox_split_srid_2154\n\nfrom geotrek.core.factories import TopologyFactory\n\nfrom . import models\n\n\n# Don't intersect with geom from PathFactory\nSPATIAL_EXTENT = (200000, 300000, 1100000, 1200000)\n\n# Create 16 cities and 4 districts distinct same-area zone covering the spatial_extent and cycle on it\ngeom_city_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=4, by_y=4, cycle=True)\ngeom_district_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\ngeom_area_iter = bbox_split_srid_2154(SPATIAL_EXTENT, by_x=2, by_y=2, cycle=True)\n\n\nclass CityFactory(factory.DjangoModelFactory):\n class Meta:\n model = models.City\n\n code = factory.Sequence(lambda n: u\"#%s\" % n) # id (!) with max_length=6\n name = factory.Sequence(lambda n: u\"City name %s\" % n)\n geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_city_iter.next()), srid=settings.SRID))\n\n\nclass DistrictFactory(factory.DjangoModelFactory):\n class Meta:\n model = models.District\n\n name = factory.Sequence(lambda n: u\"District name %s\" % n)\n geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_district_iter.next()), srid=settings.SRID))\n\n\nclass RestrictedAreaTypeFactory(factory.DjangoModelFactory):\n\n class Meta:\n model = models.RestrictedAreaType\n\n name = factory.Sequence(lambda n: u\"Restricted name %s\" % n)\n\n\nclass RestrictedAreaFactory(factory.DjangoModelFactory):\n class Meta:\n model = models.RestrictedArea\n\n name = factory.Sequence(lambda n: u\"Restricted area name %s\" % n)\n geom = factory.Sequence(lambda _: MultiPolygon(Polygon.from_bbox(geom_area_iter.next()), srid=settings.SRID))\n area_type = factory.SubFactory(RestrictedAreaTypeFactory)\n\n\nclass RestrictedAreaEdgeFactory(TopologyFactory):\n\n class Meta:\n model = models.RestrictedAreaEdge\n\n restricted_area = factory.SubFactory(RestrictedAreaFactory)\n\n\nclass CityEdgeFactory(TopologyFactory):\n\n class Meta:\n model = models.CityEdge\n\n city = factory.SubFactory(CityFactory)\n\n\nclass DistrictEdgeFactory(TopologyFactory):\n\n class Meta:\n model = models.DistrictEdge\n\n district = factory.SubFactory(DistrictFactory)\n", "path": "geotrek/zoning/factories.py"}]} | 1,146 | 350 |
gh_patches_debug_52443 | rasdani/github-patches | git_diff | ipython__ipython-3901 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
under Windows, "ipython3 nbconvert "C:/blabla/first_try.ipynb" --to latex --post PDF" POST processing action fails because of a bad parameter
Hello,
The "one single step" option to create a ".pdf" from a .ipynb" fails on my windows python3 pc
Nbconvert apparently tries compile ".TEX" result with
"pdflatex .\first_try.tex"
==> It generates a bad behaviour of pdflatex, which picks "pdfTex" option instead of "PdfLatex".
The working option, on my Windows PC and when I do it by hand, is not to put the ".\"
"pdflatex first_try.tex"
UPDATE : replacing ".\" per "./" seems also to be a solution.
"pdflatex ./first_try.tex"
Hint to the problem comes from here
http://tex.stackexchange.com/questions/78178/miktex-how-to-run-pdflatex-from-cmd-prompt-on-windows-7-compared-to-windows-xp
Details below.
Sheers
*\* instruction *\*
ipython3 nbconvert "C:/blabla/first_try.ipynb" --to latex --post PDF"
**\* (start of the output ) ***
C:\Users\parent\Desktop\winpython\WinPython-32bit-3.3.2.1rc1\python-3.3.2>ipytho
n3 nbconvert "C:/blabla//first_try.ipynb" --to latex --po
st PDF
[NbConvertApp] Using existing profile dir: 'C:\Users\parent\Desktop\winpytho
n\WinPython-32bit-3.3.2.1rc1\settings\.ipython\profile_default'
[NbConvertApp] Converting notebook C:/blabla/first_try.ipynb to latex
[NbConvertApp] Support files will be in first_try_files\
[NbConvertApp] Loaded template latex_article.tplx
[NbConvertApp] Writing 53680 bytes to .\first_try.tex
[NbConvertApp] Building PDF: `pdflatex .\first_try.tex`
This is pdfTeX, Version 3.1415926-2.4-1.40.13 (TeX Live 2012/W32TeX)
restricted \write18 enabled.
entering extended mode
! Undefined control sequence.
<_> .\first
_try.tex
?
*_\* (end of the output ) ***
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/nbconvert/writers/files.py`
Content:
```
1 """
2 Contains writer for writing nbconvert output to filesystem.
3 """
4 #-----------------------------------------------------------------------------
5 #Copyright (c) 2013, the IPython Development Team.
6 #
7 #Distributed under the terms of the Modified BSD License.
8 #
9 #The full license is in the file COPYING.txt, distributed with this software.
10 #-----------------------------------------------------------------------------
11
12 #-----------------------------------------------------------------------------
13 # Imports
14 #-----------------------------------------------------------------------------
15
16 import io
17 import os
18 import glob
19
20 from IPython.utils.traitlets import Unicode
21 from IPython.utils.path import link_or_copy
22
23 from .base import WriterBase
24
25 #-----------------------------------------------------------------------------
26 # Classes
27 #-----------------------------------------------------------------------------
28
29 class FilesWriter(WriterBase):
30 """Consumes nbconvert output and produces files."""
31
32
33 build_directory = Unicode(".", config=True,
34 help="""Directory to write output to. Leave blank
35 to output to the current directory""")
36
37
38 # Make sure that the output directory exists.
39 def _build_directory_changed(self, name, old, new):
40 if new and not os.path.isdir(new):
41 os.makedirs(new)
42
43
44 def __init__(self, **kw):
45 super(FilesWriter, self).__init__(**kw)
46 self._build_directory_changed('build_directory', self.build_directory,
47 self.build_directory)
48
49 def _makedir(self, path):
50 """Make a directory if it doesn't already exist"""
51 if not os.path.isdir(path):
52 self.log.info("Making directory %s", path)
53 os.makedirs(path)
54
55 def write(self, output, resources, notebook_name=None, **kw):
56 """
57 Consume and write Jinja output to the file system. Output directory
58 is set via the 'build_directory' variable of this instance (a
59 configurable).
60
61 See base for more...
62 """
63
64 # Pull the extension and subdir from the resources dict.
65 output_extension = resources['output_extension']
66
67 # Write all of the extracted resources to the destination directory.
68 # NOTE: WE WRITE EVERYTHING AS-IF IT'S BINARY. THE EXTRACT FIG
69 # TRANSFORMER SHOULD HANDLE UNIX/WINDOWS LINE ENDINGS...
70 for filename, data in resources.get('outputs', {}).items():
71
72 # Determine where to write the file to
73 dest = os.path.join(self.build_directory, filename)
74 path = os.path.dirname(dest)
75 self._makedir(path)
76
77 # Write file
78 self.log.debug("Writing %i bytes to support file %s", len(data), dest)
79 with io.open(dest, 'wb') as f:
80 f.write(data)
81
82 # Copy referenced files to output directory
83 if self.build_directory:
84 for filename in self.files:
85
86 # Copy files that match search pattern
87 for matching_filename in glob.glob(filename):
88
89 # Make sure folder exists.
90 dest = os.path.join(self.build_directory, filename)
91 path = os.path.dirname(dest)
92 self._makedir(path)
93
94 # Copy if destination is different.
95 if not os.path.normpath(dest) == os.path.normpath(matching_filename):
96 self.log.info("Linking %s -> %s", matching_filename, dest)
97 link_or_copy(matching_filename, dest)
98
99 # Determine where to write conversion results.
100 dest = notebook_name + '.' + output_extension
101 if self.build_directory:
102 dest = os.path.join(self.build_directory, dest)
103
104 # Write conversion results.
105 self.log.info("Writing %i bytes to %s", len(output), dest)
106 with io.open(dest, 'w', encoding='utf-8') as f:
107 f.write(output)
108 return dest
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/nbconvert/writers/files.py b/IPython/nbconvert/writers/files.py
--- a/IPython/nbconvert/writers/files.py
+++ b/IPython/nbconvert/writers/files.py
@@ -30,7 +30,7 @@
"""Consumes nbconvert output and produces files."""
- build_directory = Unicode(".", config=True,
+ build_directory = Unicode("", config=True,
help="""Directory to write output to. Leave blank
to output to the current directory""")
| {"golden_diff": "diff --git a/IPython/nbconvert/writers/files.py b/IPython/nbconvert/writers/files.py\n--- a/IPython/nbconvert/writers/files.py\n+++ b/IPython/nbconvert/writers/files.py\n@@ -30,7 +30,7 @@\n \"\"\"Consumes nbconvert output and produces files.\"\"\"\n \n \n- build_directory = Unicode(\".\", config=True, \n+ build_directory = Unicode(\"\", config=True,\n help=\"\"\"Directory to write output to. Leave blank\n to output to the current directory\"\"\")\n", "issue": "under Windows, \"ipython3 nbconvert \"C:/blabla/first_try.ipynb\" --to latex --post PDF\" POST processing action fails because of a bad parameter\nHello,\n\nThe \"one single step\" option to create a \".pdf\" from a .ipynb\" fails on my windows python3 pc \n\nNbconvert apparently tries compile \".TEX\" result with \n\n\"pdflatex .\\first_try.tex\" \n\n==> It generates a bad behaviour of pdflatex, which picks \"pdfTex\" option instead of \"PdfLatex\".\n\nThe working option, on my Windows PC and when I do it by hand, is not to put the \".\\\" \n\n\"pdflatex first_try.tex\" \n\nUPDATE : replacing \".\\\" per \"./\" seems also to be a solution.\n\"pdflatex ./first_try.tex\" \n\nHint to the problem comes from here \nhttp://tex.stackexchange.com/questions/78178/miktex-how-to-run-pdflatex-from-cmd-prompt-on-windows-7-compared-to-windows-xp \n\nDetails below.\n\nSheers\n\n*\\* instruction *\\* \nipython3 nbconvert \"C:/blabla/first_try.ipynb\" --to latex --post PDF\"\n\n**\\* (start of the output ) ***\nC:\\Users\\parent\\Desktop\\winpython\\WinPython-32bit-3.3.2.1rc1\\python-3.3.2>ipytho\nn3 nbconvert \"C:/blabla//first_try.ipynb\" --to latex --po\nst PDF\n[NbConvertApp] Using existing profile dir: 'C:\\Users\\parent\\Desktop\\winpytho\nn\\WinPython-32bit-3.3.2.1rc1\\settings\\.ipython\\profile_default'\n[NbConvertApp] Converting notebook C:/blabla/first_try.ipynb to latex\n[NbConvertApp] Support files will be in first_try_files\\\n[NbConvertApp] Loaded template latex_article.tplx\n[NbConvertApp] Writing 53680 bytes to .\\first_try.tex\n[NbConvertApp] Building PDF: `pdflatex .\\first_try.tex`\nThis is pdfTeX, Version 3.1415926-2.4-1.40.13 (TeX Live 2012/W32TeX)\n restricted \\write18 enabled.\nentering extended mode\n! Undefined control sequence.\n<_> .\\first\n _try.tex\n?\n*_\\* (end of the output ) ***\n\n", "before_files": [{"content": "\"\"\"\nContains writer for writing nbconvert output to filesystem.\n\"\"\"\n#-----------------------------------------------------------------------------\n#Copyright (c) 2013, the IPython Development Team.\n#\n#Distributed under the terms of the Modified BSD License.\n#\n#The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport io\nimport os\nimport glob\n\nfrom IPython.utils.traitlets import Unicode\nfrom IPython.utils.path import link_or_copy\n\nfrom .base import WriterBase\n\n#-----------------------------------------------------------------------------\n# Classes\n#-----------------------------------------------------------------------------\n\nclass FilesWriter(WriterBase):\n \"\"\"Consumes nbconvert output and produces files.\"\"\"\n\n\n build_directory = Unicode(\".\", config=True, \n help=\"\"\"Directory to write output to. Leave blank\n to output to the current directory\"\"\")\n\n\n # Make sure that the output directory exists.\n def _build_directory_changed(self, name, old, new):\n if new and not os.path.isdir(new):\n os.makedirs(new)\n\n\n def __init__(self, **kw):\n super(FilesWriter, self).__init__(**kw)\n self._build_directory_changed('build_directory', self.build_directory, \n self.build_directory)\n \n def _makedir(self, path):\n \"\"\"Make a directory if it doesn't already exist\"\"\"\n if not os.path.isdir(path):\n self.log.info(\"Making directory %s\", path)\n os.makedirs(path)\n\n def write(self, output, resources, notebook_name=None, **kw):\n \"\"\"\n Consume and write Jinja output to the file system. Output directory\n is set via the 'build_directory' variable of this instance (a \n configurable).\n\n See base for more...\n \"\"\"\n\n # Pull the extension and subdir from the resources dict.\n output_extension = resources['output_extension']\n\n # Write all of the extracted resources to the destination directory.\n # NOTE: WE WRITE EVERYTHING AS-IF IT'S BINARY. THE EXTRACT FIG\n # TRANSFORMER SHOULD HANDLE UNIX/WINDOWS LINE ENDINGS...\n for filename, data in resources.get('outputs', {}).items():\n\n # Determine where to write the file to\n dest = os.path.join(self.build_directory, filename)\n path = os.path.dirname(dest)\n self._makedir(path)\n\n # Write file\n self.log.debug(\"Writing %i bytes to support file %s\", len(data), dest)\n with io.open(dest, 'wb') as f:\n f.write(data)\n\n # Copy referenced files to output directory\n if self.build_directory:\n for filename in self.files:\n\n # Copy files that match search pattern\n for matching_filename in glob.glob(filename):\n\n # Make sure folder exists.\n dest = os.path.join(self.build_directory, filename)\n path = os.path.dirname(dest)\n self._makedir(path)\n\n # Copy if destination is different.\n if not os.path.normpath(dest) == os.path.normpath(matching_filename):\n self.log.info(\"Linking %s -> %s\", matching_filename, dest)\n link_or_copy(matching_filename, dest)\n\n # Determine where to write conversion results.\n dest = notebook_name + '.' + output_extension\n if self.build_directory:\n dest = os.path.join(self.build_directory, dest)\n\n # Write conversion results.\n self.log.info(\"Writing %i bytes to %s\", len(output), dest)\n with io.open(dest, 'w', encoding='utf-8') as f:\n f.write(output)\n return dest", "path": "IPython/nbconvert/writers/files.py"}], "after_files": [{"content": "\"\"\"\nContains writer for writing nbconvert output to filesystem.\n\"\"\"\n#-----------------------------------------------------------------------------\n#Copyright (c) 2013, the IPython Development Team.\n#\n#Distributed under the terms of the Modified BSD License.\n#\n#The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport io\nimport os\nimport glob\n\nfrom IPython.utils.traitlets import Unicode\nfrom IPython.utils.path import link_or_copy\n\nfrom .base import WriterBase\n\n#-----------------------------------------------------------------------------\n# Classes\n#-----------------------------------------------------------------------------\n\nclass FilesWriter(WriterBase):\n \"\"\"Consumes nbconvert output and produces files.\"\"\"\n\n\n build_directory = Unicode(\"\", config=True,\n help=\"\"\"Directory to write output to. Leave blank\n to output to the current directory\"\"\")\n\n\n # Make sure that the output directory exists.\n def _build_directory_changed(self, name, old, new):\n if new and not os.path.isdir(new):\n os.makedirs(new)\n\n\n def __init__(self, **kw):\n super(FilesWriter, self).__init__(**kw)\n self._build_directory_changed('build_directory', self.build_directory, \n self.build_directory)\n \n def _makedir(self, path):\n \"\"\"Make a directory if it doesn't already exist\"\"\"\n if not os.path.isdir(path):\n self.log.info(\"Making directory %s\", path)\n os.makedirs(path)\n\n def write(self, output, resources, notebook_name=None, **kw):\n \"\"\"\n Consume and write Jinja output to the file system. Output directory\n is set via the 'build_directory' variable of this instance (a \n configurable).\n\n See base for more...\n \"\"\"\n\n # Pull the extension and subdir from the resources dict.\n output_extension = resources['output_extension']\n\n # Write all of the extracted resources to the destination directory.\n # NOTE: WE WRITE EVERYTHING AS-IF IT'S BINARY. THE EXTRACT FIG\n # TRANSFORMER SHOULD HANDLE UNIX/WINDOWS LINE ENDINGS...\n for filename, data in resources.get('outputs', {}).items():\n\n # Determine where to write the file to\n dest = os.path.join(self.build_directory, filename)\n path = os.path.dirname(dest)\n self._makedir(path)\n\n # Write file\n self.log.debug(\"Writing %i bytes to support file %s\", len(data), dest)\n with io.open(dest, 'wb') as f:\n f.write(data)\n\n # Copy referenced files to output directory\n if self.build_directory:\n for filename in self.files:\n\n # Copy files that match search pattern\n for matching_filename in glob.glob(filename):\n\n # Make sure folder exists.\n dest = os.path.join(self.build_directory, filename)\n path = os.path.dirname(dest)\n self._makedir(path)\n\n # Copy if destination is different.\n if not os.path.normpath(dest) == os.path.normpath(matching_filename):\n self.log.info(\"Linking %s -> %s\", matching_filename, dest)\n link_or_copy(matching_filename, dest)\n\n # Determine where to write conversion results.\n dest = notebook_name + '.' + output_extension\n if self.build_directory:\n dest = os.path.join(self.build_directory, dest)\n\n # Write conversion results.\n self.log.info(\"Writing %i bytes to %s\", len(output), dest)\n with io.open(dest, 'w', encoding='utf-8') as f:\n f.write(output)\n return dest", "path": "IPython/nbconvert/writers/files.py"}]} | 1,801 | 114 |
gh_patches_debug_40745 | rasdani/github-patches | git_diff | svthalia__concrexit-3652 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The same names with different capitalisation are seen as different
### Describe the bug
When claiming promo requests in the admin site, if the claimant name is entered twice, first without capital and then with one. It is counted as two different persons.
### Expected behaviour
The same name with different capitalisation should still count as the same name.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/promotion/admin.py`
Content:
```
1 """Registers admin interfaces for the models defined in this module."""
2 from django.contrib import admin
3 from django.contrib.admin import ModelAdmin
4
5 from events.services import is_organiser
6 from promotion.forms import PromotionRequestForm
7
8 from .models import PromotionChannel, PromotionRequest
9
10
11 @admin.register(PromotionRequest)
12 class PromotionRequestAdmin(admin.ModelAdmin):
13 """This manages the admin interface for the model items."""
14
15 list_display = ("event", "publish_date", "channel", "assigned_to", "status")
16 list_filter = (
17 "publish_date",
18 "assigned_to",
19 "status",
20 )
21 date_hierarchy = "publish_date"
22 form = PromotionRequestForm
23 actions = ["mark_not_started", "mark_started", "mark_finished", "mark_published"]
24
25 def has_change_permission(self, request, obj=None):
26 if obj is not None and obj.event and is_organiser(request.member, obj.event):
27 return True
28 return super().has_change_permission(request, obj)
29
30 def mark_not_started(self, request, queryset):
31 """Change the status of the event to published."""
32 self._change_published(queryset, PromotionRequest.NOT_STARTED)
33
34 mark_not_started.short_description = "Mark requests as not started"
35
36 def mark_started(self, request, queryset):
37 """Change the status of the event to published."""
38 self._change_published(queryset, PromotionRequest.STARTED)
39
40 mark_started.short_description = "Mark requests as started"
41
42 def mark_finished(self, request, queryset):
43 """Change the status of the event to published."""
44 self._change_published(queryset, PromotionRequest.FINISHED)
45
46 mark_finished.short_description = "Mark requests as finished"
47
48 def mark_published(self, request, queryset):
49 """Change the status of the event to published."""
50 self._change_published(queryset, PromotionRequest.PUBLISHED)
51
52 mark_published.short_description = "Mark requests as published"
53
54 @staticmethod
55 def _change_published(queryset, status):
56 queryset.update(status=status)
57
58
59 @admin.register(PromotionChannel)
60 class PromotionChannelAdmin(ModelAdmin):
61 list_display = ("name", "publisher_reminder_email")
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/promotion/admin.py b/website/promotion/admin.py
--- a/website/promotion/admin.py
+++ b/website/promotion/admin.py
@@ -1,6 +1,9 @@
"""Registers admin interfaces for the models defined in this module."""
+
from django.contrib import admin
from django.contrib.admin import ModelAdmin
+from django.db import models
+from django.db.models.functions import Lower
from events.services import is_organiser
from promotion.forms import PromotionRequestForm
@@ -8,6 +11,75 @@
from .models import PromotionChannel, PromotionRequest
+class CaseInsensitiveFilter(admin.FieldListFilter):
+ def __init__(self, field, request, params, model, model_admin, field_path):
+ self.lookup_kwarg = f"{field_path}__iexact"
+ self.lookup_kwarg2 = f"{field_path}__isnull"
+ self.lookup_val = params.get(self.lookup_kwarg)
+ self.lookup_val2 = params.get(self.lookup_kwarg2)
+ super().__init__(field, request, params, model, model_admin, field_path)
+ self.empty_value_display = model_admin.get_empty_value_display()
+ queryset = model_admin.get_queryset(request)
+ lookup_choices = (
+ queryset.annotate(lowered=Lower(field.name))
+ .order_by(field.name)
+ .distinct()
+ .values_list(field.name, flat=True)
+ )
+ self.lookup_choices = set(
+ map(lambda x: x.lower() if x is not None else x, lookup_choices)
+ )
+
+ def get_facet_counts(self, pk_attname, filtered_qs):
+ return {
+ f"{i}__c": models.Count(
+ pk_attname,
+ filter=models.Q(
+ (self.lookup_kwarg, value)
+ if value is not None
+ else (self.lookup_kwarg2, True)
+ ),
+ )
+ for i, value in enumerate(self.lookup_choices)
+ }
+
+ def choices(self, changelist):
+ add_facets = changelist.add_facets
+ facet_counts = self.get_facet_queryset(changelist)
+ yield {
+ "selected": self.lookup_val is None,
+ "query_string": changelist.get_query_string(
+ remove=[self.lookup_kwarg, self.lookup_kwarg2]
+ ),
+ "display": "All",
+ }
+ include_none = False
+ empty_title = self.empty_value_display
+ for key, val in enumerate(self.lookup_choices):
+ if add_facets:
+ count = facet_counts[f"{key}__c"]
+ if val is None:
+ include_none = True
+ empty_title = f"{empty_title} ({count})" if add_facets else empty_title
+ continue
+ yield {
+ "selected": self.lookup_val is not None and val in self.lookup_val,
+ "query_string": changelist.get_query_string({self.lookup_kwarg: val}),
+ "display": f"{val} ({count})" if add_facets else val,
+ }
+ if include_none:
+ yield {
+ "selected": self.lookup_val2 is True,
+ "query_string": changelist.get_query_string(
+ {self.lookup_kwarg2: "True"}, remove=[self.lookup_kwarg]
+ ),
+ "display": empty_title,
+ }
+
+ def expected_parameters(self):
+ return [self.lookup_kwarg, self.lookup_kwarg2]
+
+
@admin.register(PromotionRequest)
class PromotionRequestAdmin(admin.ModelAdmin):
"""This manages the admin interface for the model items."""
@@ -15,7 +87,7 @@
list_display = ("event", "publish_date", "channel", "assigned_to", "status")
list_filter = (
"publish_date",
- "assigned_to",
+ ("assigned_to", CaseInsensitiveFilter),
"status",
)
date_hierarchy = "publish_date"
| {"golden_diff": "diff --git a/website/promotion/admin.py b/website/promotion/admin.py\n--- a/website/promotion/admin.py\n+++ b/website/promotion/admin.py\n@@ -1,6 +1,9 @@\n \"\"\"Registers admin interfaces for the models defined in this module.\"\"\"\n+\n from django.contrib import admin\n from django.contrib.admin import ModelAdmin\n+from django.db import models\n+from django.db.models.functions import Lower\n \n from events.services import is_organiser\n from promotion.forms import PromotionRequestForm\n@@ -8,6 +11,75 @@\n from .models import PromotionChannel, PromotionRequest\n \n \n+class CaseInsensitiveFilter(admin.FieldListFilter):\n+ def __init__(self, field, request, params, model, model_admin, field_path):\n+ self.lookup_kwarg = f\"{field_path}__iexact\"\n+ self.lookup_kwarg2 = f\"{field_path}__isnull\"\n+ self.lookup_val = params.get(self.lookup_kwarg)\n+ self.lookup_val2 = params.get(self.lookup_kwarg2)\n+ super().__init__(field, request, params, model, model_admin, field_path)\n+ self.empty_value_display = model_admin.get_empty_value_display()\n+ queryset = model_admin.get_queryset(request)\n+ lookup_choices = (\n+ queryset.annotate(lowered=Lower(field.name))\n+ .order_by(field.name)\n+ .distinct()\n+ .values_list(field.name, flat=True)\n+ )\n+ self.lookup_choices = set(\n+ map(lambda x: x.lower() if x is not None else x, lookup_choices)\n+ )\n+\n+ def get_facet_counts(self, pk_attname, filtered_qs):\n+ return {\n+ f\"{i}__c\": models.Count(\n+ pk_attname,\n+ filter=models.Q(\n+ (self.lookup_kwarg, value)\n+ if value is not None\n+ else (self.lookup_kwarg2, True)\n+ ),\n+ )\n+ for i, value in enumerate(self.lookup_choices)\n+ }\n+\n+ def choices(self, changelist):\n+ add_facets = changelist.add_facets\n+ facet_counts = self.get_facet_queryset(changelist)\n+ yield {\n+ \"selected\": self.lookup_val is None,\n+ \"query_string\": changelist.get_query_string(\n+ remove=[self.lookup_kwarg, self.lookup_kwarg2]\n+ ),\n+ \"display\": \"All\",\n+ }\n+ include_none = False\n+ empty_title = self.empty_value_display\n+ for key, val in enumerate(self.lookup_choices):\n+ if add_facets:\n+ count = facet_counts[f\"{key}__c\"]\n+ if val is None:\n+ include_none = True\n+ empty_title = f\"{empty_title} ({count})\" if add_facets else empty_title\n+ continue\n+ yield {\n+ \"selected\": self.lookup_val is not None and val in self.lookup_val,\n+ \"query_string\": changelist.get_query_string({self.lookup_kwarg: val}),\n+ \"display\": f\"{val} ({count})\" if add_facets else val,\n+ }\n+ if include_none:\n+ yield {\n+ \"selected\": self.lookup_val2 is True,\n+ \"query_string\": changelist.get_query_string(\n+ {self.lookup_kwarg2: \"True\"}, remove=[self.lookup_kwarg]\n+ ),\n+ \"display\": empty_title,\n+ }\n+\n+ def expected_parameters(self):\n+ return [self.lookup_kwarg, self.lookup_kwarg2]\n+\n+\n @admin.register(PromotionRequest)\n class PromotionRequestAdmin(admin.ModelAdmin):\n \"\"\"This manages the admin interface for the model items.\"\"\"\n@@ -15,7 +87,7 @@\n list_display = (\"event\", \"publish_date\", \"channel\", \"assigned_to\", \"status\")\n list_filter = (\n \"publish_date\",\n- \"assigned_to\",\n+ (\"assigned_to\", CaseInsensitiveFilter),\n \"status\",\n )\n date_hierarchy = \"publish_date\"\n", "issue": "The same names with different capitalisation are seen as different\n### Describe the bug\r\nWhen claiming promo requests in the admin site, if the claimant name is entered twice, first without capital and then with one. It is counted as two different persons.\r\n\r\n### Expected behaviour\r\nThe same name with different capitalisation should still count as the same name.\r\n\n", "before_files": [{"content": "\"\"\"Registers admin interfaces for the models defined in this module.\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\n\nfrom events.services import is_organiser\nfrom promotion.forms import PromotionRequestForm\n\nfrom .models import PromotionChannel, PromotionRequest\n\n\[email protected](PromotionRequest)\nclass PromotionRequestAdmin(admin.ModelAdmin):\n \"\"\"This manages the admin interface for the model items.\"\"\"\n\n list_display = (\"event\", \"publish_date\", \"channel\", \"assigned_to\", \"status\")\n list_filter = (\n \"publish_date\",\n \"assigned_to\",\n \"status\",\n )\n date_hierarchy = \"publish_date\"\n form = PromotionRequestForm\n actions = [\"mark_not_started\", \"mark_started\", \"mark_finished\", \"mark_published\"]\n\n def has_change_permission(self, request, obj=None):\n if obj is not None and obj.event and is_organiser(request.member, obj.event):\n return True\n return super().has_change_permission(request, obj)\n\n def mark_not_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.NOT_STARTED)\n\n mark_not_started.short_description = \"Mark requests as not started\"\n\n def mark_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.STARTED)\n\n mark_started.short_description = \"Mark requests as started\"\n\n def mark_finished(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.FINISHED)\n\n mark_finished.short_description = \"Mark requests as finished\"\n\n def mark_published(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.PUBLISHED)\n\n mark_published.short_description = \"Mark requests as published\"\n\n @staticmethod\n def _change_published(queryset, status):\n queryset.update(status=status)\n\n\[email protected](PromotionChannel)\nclass PromotionChannelAdmin(ModelAdmin):\n list_display = (\"name\", \"publisher_reminder_email\")\n", "path": "website/promotion/admin.py"}], "after_files": [{"content": "\"\"\"Registers admin interfaces for the models defined in this module.\"\"\"\n\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\nfrom django.db import models\nfrom django.db.models.functions import Lower\n\nfrom events.services import is_organiser\nfrom promotion.forms import PromotionRequestForm\n\nfrom .models import PromotionChannel, PromotionRequest\n\n\nclass CaseInsensitiveFilter(admin.FieldListFilter):\n def __init__(self, field, request, params, model, model_admin, field_path):\n self.lookup_kwarg = f\"{field_path}__iexact\"\n self.lookup_kwarg2 = f\"{field_path}__isnull\"\n self.lookup_val = params.get(self.lookup_kwarg)\n self.lookup_val2 = params.get(self.lookup_kwarg2)\n super().__init__(field, request, params, model, model_admin, field_path)\n self.empty_value_display = model_admin.get_empty_value_display()\n queryset = model_admin.get_queryset(request)\n lookup_choices = (\n queryset.annotate(lowered=Lower(field.name))\n .order_by(field.name)\n .distinct()\n .values_list(field.name, flat=True)\n )\n self.lookup_choices = set(\n map(lambda x: x.lower() if x is not None else x, lookup_choices)\n )\n\n def get_facet_counts(self, pk_attname, filtered_qs):\n return {\n f\"{i}__c\": models.Count(\n pk_attname,\n filter=models.Q(\n (self.lookup_kwarg, value)\n if value is not None\n else (self.lookup_kwarg2, True)\n ),\n )\n for i, value in enumerate(self.lookup_choices)\n }\n\n def choices(self, changelist):\n add_facets = changelist.add_facets\n facet_counts = self.get_facet_queryset(changelist)\n yield {\n \"selected\": self.lookup_val is None,\n \"query_string\": changelist.get_query_string(\n remove=[self.lookup_kwarg, self.lookup_kwarg2]\n ),\n \"display\": \"All\",\n }\n include_none = False\n empty_title = self.empty_value_display\n for key, val in enumerate(self.lookup_choices):\n if add_facets:\n count = facet_counts[f\"{key}__c\"]\n if val is None:\n include_none = True\n empty_title = f\"{empty_title} ({count})\" if add_facets else empty_title\n continue\n yield {\n \"selected\": self.lookup_val is not None and val in self.lookup_val,\n \"query_string\": changelist.get_query_string({self.lookup_kwarg: val}),\n \"display\": f\"{val} ({count})\" if add_facets else val,\n }\n if include_none:\n yield {\n \"selected\": self.lookup_val2 is True,\n \"query_string\": changelist.get_query_string(\n {self.lookup_kwarg2: \"True\"}, remove=[self.lookup_kwarg]\n ),\n \"display\": empty_title,\n }\n\n def expected_parameters(self):\n return [self.lookup_kwarg, self.lookup_kwarg2]\n\n\[email protected](PromotionRequest)\nclass PromotionRequestAdmin(admin.ModelAdmin):\n \"\"\"This manages the admin interface for the model items.\"\"\"\n\n list_display = (\"event\", \"publish_date\", \"channel\", \"assigned_to\", \"status\")\n list_filter = (\n \"publish_date\",\n (\"assigned_to\", CaseInsensitiveFilter),\n \"status\",\n )\n date_hierarchy = \"publish_date\"\n form = PromotionRequestForm\n actions = [\"mark_not_started\", \"mark_started\", \"mark_finished\", \"mark_published\"]\n\n def has_change_permission(self, request, obj=None):\n if obj is not None and obj.event and is_organiser(request.member, obj.event):\n return True\n return super().has_change_permission(request, obj)\n\n def mark_not_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.NOT_STARTED)\n\n mark_not_started.short_description = \"Mark requests as not started\"\n\n def mark_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.STARTED)\n\n mark_started.short_description = \"Mark requests as started\"\n\n def mark_finished(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.FINISHED)\n\n mark_finished.short_description = \"Mark requests as finished\"\n\n def mark_published(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.PUBLISHED)\n\n mark_published.short_description = \"Mark requests as published\"\n\n @staticmethod\n def _change_published(queryset, status):\n queryset.update(status=status)\n\n\[email protected](PromotionChannel)\nclass PromotionChannelAdmin(ModelAdmin):\n list_display = (\"name\", \"publisher_reminder_email\")\n", "path": "website/promotion/admin.py"}]} | 904 | 874 |
gh_patches_debug_19388 | rasdani/github-patches | git_diff | deepset-ai__haystack-3630 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TCP port in `launch_opensearch()` is different from default value in `OpenSearchDocumentStore`
In `launch_opensearch()` we are starting an OpenSearch container using the port `9201`. The default port for `OpenSearchDocumentStore` is currently `9200`. I think we should align those two values.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/utils/doc_store.py`
Content:
```
1 # pylint: disable=missing-timeout
2
3 import time
4 import logging
5 import subprocess
6 from pathlib import Path
7
8 import requests
9
10
11 logger = logging.getLogger(__name__)
12 ELASTICSEARCH_CONTAINER_NAME = "elasticsearch"
13 OPENSEARCH_CONTAINER_NAME = "opensearch"
14 WEAVIATE_CONTAINER_NAME = "weaviate"
15
16
17 def launch_es(sleep=15, delete_existing=False):
18 """
19 Start an Elasticsearch server via Docker.
20 """
21
22 logger.debug("Starting Elasticsearch ...")
23 if delete_existing:
24 _ = subprocess.run([f"docker rm --force {ELASTICSEARCH_CONTAINER_NAME}"], shell=True, stdout=subprocess.DEVNULL)
25 status = subprocess.run(
26 [
27 f'docker start {ELASTICSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9200:9200 -e "discovery.type=single-node" --name {ELASTICSEARCH_CONTAINER_NAME} elasticsearch:7.9.2'
28 ],
29 shell=True,
30 )
31 if status.returncode:
32 logger.warning(
33 "Tried to start Elasticsearch through Docker but this failed. "
34 "It is likely that there is already an existing Elasticsearch instance running. "
35 )
36 else:
37 time.sleep(sleep)
38
39
40 def launch_opensearch(sleep=15, delete_existing=False):
41 """
42 Start an OpenSearch server via Docker.
43 """
44 logger.debug("Starting OpenSearch...")
45 # This line is needed since it is not possible to start a new docker container with the name opensearch if there is a stopped image with the same now
46 # docker rm only succeeds if the container is stopped, not if it is running
47 if delete_existing:
48 _ = subprocess.run([f"docker rm --force {OPENSEARCH_CONTAINER_NAME}"], shell=True, stdout=subprocess.DEVNULL)
49 status = subprocess.run(
50 [
51 f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9201:9200 -p 9600:9600 -e "discovery.type=single-node" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.3.5'
52 ],
53 shell=True,
54 )
55 if status.returncode:
56 logger.warning(
57 "Tried to start OpenSearch through Docker but this failed. "
58 "It is likely that there is already an existing OpenSearch instance running. "
59 )
60 else:
61 time.sleep(sleep)
62
63
64 def launch_weaviate(sleep=15):
65 """
66 Start a Weaviate server via Docker.
67 """
68
69 logger.debug("Starting Weaviate ...")
70 status = subprocess.run(
71 [
72 f"docker start {WEAVIATE_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 8080:8080 --env AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED='true' --env PERSISTENCE_DATA_PATH='/var/lib/weaviate' --name {WEAVIATE_CONTAINER_NAME} semitechnologies/weaviate:latest"
73 ],
74 shell=True,
75 )
76 if status.returncode:
77 logger.warning(
78 "Tried to start Weaviate through Docker but this failed. "
79 "It is likely that there is already an existing Weaviate instance running. "
80 )
81 else:
82 time.sleep(sleep)
83
84
85 def stop_container(container_name, delete_container=False):
86 logger.debug("Stopping %s...", container_name)
87 status = subprocess.run([f"docker stop {container_name}"], shell=True)
88 if status.returncode:
89 logger.warning(
90 f"Tried to stop {container_name} but this failed. "
91 f"It is likely that there was no Docker container with the name {container_name}"
92 )
93 if delete_container:
94 status = subprocess.run([f"docker rm {container_name}"], shell=True)
95
96
97 def stop_opensearch(delete_container=False):
98 stop_container(OPENSEARCH_CONTAINER_NAME, delete_container)
99
100
101 def stop_elasticsearch(delete_container=False):
102 stop_container(ELASTICSEARCH_CONTAINER_NAME, delete_container)
103
104
105 def stop_weaviate(delete_container=False):
106 stop_container(WEAVIATE_CONTAINER_NAME, delete_container)
107
108
109 def stop_service(document_store, delete_container=False):
110 ds_class = str(type(document_store))
111 if "OpenSearchDocumentStore" in ds_class:
112 stop_opensearch(delete_container)
113 elif "ElasticsearchDocumentStore" in ds_class:
114 stop_elasticsearch(delete_container)
115 elif "WeaviateDocumentStore" in ds_class:
116 stop_weaviate(delete_container)
117 else:
118 logger.warning("No support yet for auto stopping the service behind a %s", type(document_store))
119
120
121 def launch_milvus(sleep=15, delete_existing=False):
122 """
123 Start a Milvus server via Docker
124 """
125 logger.debug("Starting Milvus ...")
126
127 milvus_dir = Path.home() / "milvus"
128 milvus_dir.mkdir(exist_ok=True)
129
130 request = requests.get(
131 "https://github.com/milvus-io/milvus/releases/download/v2.0.0/milvus-standalone-docker-compose.yml"
132 )
133 with open(milvus_dir / "docker-compose.yml", "wb") as f:
134 f.write(request.content)
135
136 status = subprocess.run(["cd /home/$USER/milvus/ && docker-compose up -d"], shell=True)
137
138 if status.returncode:
139 logger.warning(
140 "Tried to start Milvus through Docker but this failed. "
141 "It is likely that there is already an existing Milvus instance running. "
142 )
143 else:
144 time.sleep(sleep)
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/utils/doc_store.py b/haystack/utils/doc_store.py
--- a/haystack/utils/doc_store.py
+++ b/haystack/utils/doc_store.py
@@ -37,7 +37,7 @@
time.sleep(sleep)
-def launch_opensearch(sleep=15, delete_existing=False):
+def launch_opensearch(sleep=15, delete_existing=False, local_port=9200):
"""
Start an OpenSearch server via Docker.
"""
@@ -48,7 +48,7 @@
_ = subprocess.run([f"docker rm --force {OPENSEARCH_CONTAINER_NAME}"], shell=True, stdout=subprocess.DEVNULL)
status = subprocess.run(
[
- f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9201:9200 -p 9600:9600 -e "discovery.type=single-node" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.3.5'
+ f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p {local_port}:9200 -p 9600:9600 -e "discovery.type=single-node" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.3.5'
],
shell=True,
)
| {"golden_diff": "diff --git a/haystack/utils/doc_store.py b/haystack/utils/doc_store.py\n--- a/haystack/utils/doc_store.py\n+++ b/haystack/utils/doc_store.py\n@@ -37,7 +37,7 @@\n time.sleep(sleep)\n \n \n-def launch_opensearch(sleep=15, delete_existing=False):\n+def launch_opensearch(sleep=15, delete_existing=False, local_port=9200):\n \"\"\"\n Start an OpenSearch server via Docker.\n \"\"\"\n@@ -48,7 +48,7 @@\n _ = subprocess.run([f\"docker rm --force {OPENSEARCH_CONTAINER_NAME}\"], shell=True, stdout=subprocess.DEVNULL)\n status = subprocess.run(\n [\n- f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9201:9200 -p 9600:9600 -e \"discovery.type=single-node\" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.3.5'\n+ f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p {local_port}:9200 -p 9600:9600 -e \"discovery.type=single-node\" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.3.5'\n ],\n shell=True,\n )\n", "issue": "TCP port in `launch_opensearch()` is different from default value in `OpenSearchDocumentStore`\nIn `launch_opensearch()` we are starting an OpenSearch container using the port `9201`. The default port for `OpenSearchDocumentStore` is currently `9200`. I think we should align those two values.\r\n\n", "before_files": [{"content": "# pylint: disable=missing-timeout\n\nimport time\nimport logging\nimport subprocess\nfrom pathlib import Path\n\nimport requests\n\n\nlogger = logging.getLogger(__name__)\nELASTICSEARCH_CONTAINER_NAME = \"elasticsearch\"\nOPENSEARCH_CONTAINER_NAME = \"opensearch\"\nWEAVIATE_CONTAINER_NAME = \"weaviate\"\n\n\ndef launch_es(sleep=15, delete_existing=False):\n \"\"\"\n Start an Elasticsearch server via Docker.\n \"\"\"\n\n logger.debug(\"Starting Elasticsearch ...\")\n if delete_existing:\n _ = subprocess.run([f\"docker rm --force {ELASTICSEARCH_CONTAINER_NAME}\"], shell=True, stdout=subprocess.DEVNULL)\n status = subprocess.run(\n [\n f'docker start {ELASTICSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9200:9200 -e \"discovery.type=single-node\" --name {ELASTICSEARCH_CONTAINER_NAME} elasticsearch:7.9.2'\n ],\n shell=True,\n )\n if status.returncode:\n logger.warning(\n \"Tried to start Elasticsearch through Docker but this failed. \"\n \"It is likely that there is already an existing Elasticsearch instance running. \"\n )\n else:\n time.sleep(sleep)\n\n\ndef launch_opensearch(sleep=15, delete_existing=False):\n \"\"\"\n Start an OpenSearch server via Docker.\n \"\"\"\n logger.debug(\"Starting OpenSearch...\")\n # This line is needed since it is not possible to start a new docker container with the name opensearch if there is a stopped image with the same now\n # docker rm only succeeds if the container is stopped, not if it is running\n if delete_existing:\n _ = subprocess.run([f\"docker rm --force {OPENSEARCH_CONTAINER_NAME}\"], shell=True, stdout=subprocess.DEVNULL)\n status = subprocess.run(\n [\n f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9201:9200 -p 9600:9600 -e \"discovery.type=single-node\" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.3.5'\n ],\n shell=True,\n )\n if status.returncode:\n logger.warning(\n \"Tried to start OpenSearch through Docker but this failed. \"\n \"It is likely that there is already an existing OpenSearch instance running. \"\n )\n else:\n time.sleep(sleep)\n\n\ndef launch_weaviate(sleep=15):\n \"\"\"\n Start a Weaviate server via Docker.\n \"\"\"\n\n logger.debug(\"Starting Weaviate ...\")\n status = subprocess.run(\n [\n f\"docker start {WEAVIATE_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 8080:8080 --env AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED='true' --env PERSISTENCE_DATA_PATH='/var/lib/weaviate' --name {WEAVIATE_CONTAINER_NAME} semitechnologies/weaviate:latest\"\n ],\n shell=True,\n )\n if status.returncode:\n logger.warning(\n \"Tried to start Weaviate through Docker but this failed. \"\n \"It is likely that there is already an existing Weaviate instance running. \"\n )\n else:\n time.sleep(sleep)\n\n\ndef stop_container(container_name, delete_container=False):\n logger.debug(\"Stopping %s...\", container_name)\n status = subprocess.run([f\"docker stop {container_name}\"], shell=True)\n if status.returncode:\n logger.warning(\n f\"Tried to stop {container_name} but this failed. \"\n f\"It is likely that there was no Docker container with the name {container_name}\"\n )\n if delete_container:\n status = subprocess.run([f\"docker rm {container_name}\"], shell=True)\n\n\ndef stop_opensearch(delete_container=False):\n stop_container(OPENSEARCH_CONTAINER_NAME, delete_container)\n\n\ndef stop_elasticsearch(delete_container=False):\n stop_container(ELASTICSEARCH_CONTAINER_NAME, delete_container)\n\n\ndef stop_weaviate(delete_container=False):\n stop_container(WEAVIATE_CONTAINER_NAME, delete_container)\n\n\ndef stop_service(document_store, delete_container=False):\n ds_class = str(type(document_store))\n if \"OpenSearchDocumentStore\" in ds_class:\n stop_opensearch(delete_container)\n elif \"ElasticsearchDocumentStore\" in ds_class:\n stop_elasticsearch(delete_container)\n elif \"WeaviateDocumentStore\" in ds_class:\n stop_weaviate(delete_container)\n else:\n logger.warning(\"No support yet for auto stopping the service behind a %s\", type(document_store))\n\n\ndef launch_milvus(sleep=15, delete_existing=False):\n \"\"\"\n Start a Milvus server via Docker\n \"\"\"\n logger.debug(\"Starting Milvus ...\")\n\n milvus_dir = Path.home() / \"milvus\"\n milvus_dir.mkdir(exist_ok=True)\n\n request = requests.get(\n \"https://github.com/milvus-io/milvus/releases/download/v2.0.0/milvus-standalone-docker-compose.yml\"\n )\n with open(milvus_dir / \"docker-compose.yml\", \"wb\") as f:\n f.write(request.content)\n\n status = subprocess.run([\"cd /home/$USER/milvus/ && docker-compose up -d\"], shell=True)\n\n if status.returncode:\n logger.warning(\n \"Tried to start Milvus through Docker but this failed. \"\n \"It is likely that there is already an existing Milvus instance running. \"\n )\n else:\n time.sleep(sleep)\n", "path": "haystack/utils/doc_store.py"}], "after_files": [{"content": "# pylint: disable=missing-timeout\n\nimport time\nimport logging\nimport subprocess\nfrom pathlib import Path\n\nimport requests\n\n\nlogger = logging.getLogger(__name__)\nELASTICSEARCH_CONTAINER_NAME = \"elasticsearch\"\nOPENSEARCH_CONTAINER_NAME = \"opensearch\"\nWEAVIATE_CONTAINER_NAME = \"weaviate\"\n\n\ndef launch_es(sleep=15, delete_existing=False):\n \"\"\"\n Start an Elasticsearch server via Docker.\n \"\"\"\n\n logger.debug(\"Starting Elasticsearch ...\")\n if delete_existing:\n _ = subprocess.run([f\"docker rm --force {ELASTICSEARCH_CONTAINER_NAME}\"], shell=True, stdout=subprocess.DEVNULL)\n status = subprocess.run(\n [\n f'docker start {ELASTICSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9200:9200 -e \"discovery.type=single-node\" --name {ELASTICSEARCH_CONTAINER_NAME} elasticsearch:7.9.2'\n ],\n shell=True,\n )\n if status.returncode:\n logger.warning(\n \"Tried to start Elasticsearch through Docker but this failed. \"\n \"It is likely that there is already an existing Elasticsearch instance running. \"\n )\n else:\n time.sleep(sleep)\n\n\ndef launch_opensearch(sleep=15, delete_existing=False, local_port=9200):\n \"\"\"\n Start an OpenSearch server via Docker.\n \"\"\"\n logger.debug(\"Starting OpenSearch...\")\n # This line is needed since it is not possible to start a new docker container with the name opensearch if there is a stopped image with the same now\n # docker rm only succeeds if the container is stopped, not if it is running\n if delete_existing:\n _ = subprocess.run([f\"docker rm --force {OPENSEARCH_CONTAINER_NAME}\"], shell=True, stdout=subprocess.DEVNULL)\n status = subprocess.run(\n [\n f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p {local_port}:9200 -p 9600:9600 -e \"discovery.type=single-node\" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.3.5'\n ],\n shell=True,\n )\n if status.returncode:\n logger.warning(\n \"Tried to start OpenSearch through Docker but this failed. \"\n \"It is likely that there is already an existing OpenSearch instance running. \"\n )\n else:\n time.sleep(sleep)\n\n\ndef launch_weaviate(sleep=15):\n \"\"\"\n Start a Weaviate server via Docker.\n \"\"\"\n\n logger.debug(\"Starting Weaviate ...\")\n status = subprocess.run(\n [\n f\"docker start {WEAVIATE_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 8080:8080 --env AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED='true' --env PERSISTENCE_DATA_PATH='/var/lib/weaviate' --name {WEAVIATE_CONTAINER_NAME} semitechnologies/weaviate:latest\"\n ],\n shell=True,\n )\n if status.returncode:\n logger.warning(\n \"Tried to start Weaviate through Docker but this failed. \"\n \"It is likely that there is already an existing Weaviate instance running. \"\n )\n else:\n time.sleep(sleep)\n\n\ndef stop_container(container_name, delete_container=False):\n logger.debug(\"Stopping %s...\", container_name)\n status = subprocess.run([f\"docker stop {container_name}\"], shell=True)\n if status.returncode:\n logger.warning(\n f\"Tried to stop {container_name} but this failed. \"\n f\"It is likely that there was no Docker container with the name {container_name}\"\n )\n if delete_container:\n status = subprocess.run([f\"docker rm {container_name}\"], shell=True)\n\n\ndef stop_opensearch(delete_container=False):\n stop_container(OPENSEARCH_CONTAINER_NAME, delete_container)\n\n\ndef stop_elasticsearch(delete_container=False):\n stop_container(ELASTICSEARCH_CONTAINER_NAME, delete_container)\n\n\ndef stop_weaviate(delete_container=False):\n stop_container(WEAVIATE_CONTAINER_NAME, delete_container)\n\n\ndef stop_service(document_store, delete_container=False):\n ds_class = str(type(document_store))\n if \"OpenSearchDocumentStore\" in ds_class:\n stop_opensearch(delete_container)\n elif \"ElasticsearchDocumentStore\" in ds_class:\n stop_elasticsearch(delete_container)\n elif \"WeaviateDocumentStore\" in ds_class:\n stop_weaviate(delete_container)\n else:\n logger.warning(\"No support yet for auto stopping the service behind a %s\", type(document_store))\n\n\ndef launch_milvus(sleep=15, delete_existing=False):\n \"\"\"\n Start a Milvus server via Docker\n \"\"\"\n logger.debug(\"Starting Milvus ...\")\n\n milvus_dir = Path.home() / \"milvus\"\n milvus_dir.mkdir(exist_ok=True)\n\n request = requests.get(\n \"https://github.com/milvus-io/milvus/releases/download/v2.0.0/milvus-standalone-docker-compose.yml\"\n )\n with open(milvus_dir / \"docker-compose.yml\", \"wb\") as f:\n f.write(request.content)\n\n status = subprocess.run([\"cd /home/$USER/milvus/ && docker-compose up -d\"], shell=True)\n\n if status.returncode:\n logger.warning(\n \"Tried to start Milvus through Docker but this failed. \"\n \"It is likely that there is already an existing Milvus instance running. \"\n )\n else:\n time.sleep(sleep)\n", "path": "haystack/utils/doc_store.py"}]} | 1,914 | 332 |
gh_patches_debug_7446 | rasdani/github-patches | git_diff | pyg-team__pytorch_geometric-8388 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LatexBuilder for docs fails
### 📚 Describe the documentation issue
The following line makes the docs building crash when using a LatexBuilder
https://github.com/pyg-team/pytorch_geometric/blob/88d7986b6d0a6de5895872270d2ff4fc95fae3b7/docs/source/conf.py#L69C1-L75C43
To reproduce build the docs with the latex builder
```bash
python -m sphinx -T -E -b latex -d _build/doctrees -D language=en . ./build
```
```bash
Extension error:
Handler <function setup.<locals>.rst_jinja_render at 0x1230b4dc0> for event 'source-read' threw an exception (exception: 'LaTeXBuilder' object has no attribute 'templates')
```
### Suggest a potential alternative/fix
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 import datetime
2 import os.path as osp
3 import sys
4
5 import pyg_sphinx_theme
6
7 import torch_geometric
8
9 author = 'PyG Team'
10 project = 'pytorch_geometric'
11 version = torch_geometric.__version__
12 copyright = f'{datetime.datetime.now().year}, {author}'
13
14 sys.path.append(osp.join(osp.dirname(pyg_sphinx_theme.__file__), 'extension'))
15
16 extensions = [
17 'sphinx.ext.autodoc',
18 'sphinx.ext.autosummary',
19 'sphinx.ext.intersphinx',
20 'sphinx.ext.mathjax',
21 'sphinx.ext.napoleon',
22 'sphinx.ext.viewcode',
23 'nbsphinx',
24 'pyg',
25 ]
26
27 html_theme = 'pyg_sphinx_theme'
28 html_logo = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/'
29 'master/pyg_sphinx_theme/static/img/pyg_logo.png')
30 html_favicon = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/'
31 'master/pyg_sphinx_theme/static/img/favicon.png')
32 html_static_path = ['_static']
33 templates_path = ['_templates']
34
35 add_module_names = False
36 autodoc_member_order = 'bysource'
37
38 suppress_warnings = ['autodoc.import_object']
39
40 intersphinx_mapping = {
41 'python': ('https://docs.python.org/', None),
42 # 'numpy': ('http://docs.scipy.org/doc/numpy', None),
43 'pandas': ('http://pandas.pydata.org/pandas-docs/dev', None),
44 'torch': ('https://pytorch.org/docs/master', None),
45 }
46
47 nbsphinx_thumbnails = {
48 'tutorial/create_gnn':
49 '_static/thumbnails/create_gnn.png',
50 'tutorial/heterogeneous':
51 '_static/thumbnails/heterogeneous.png',
52 'tutorial/create_dataset':
53 '_static/thumbnails/create_dataset.png',
54 'tutorial/load_csv':
55 '_static/thumbnails/load_csv.png',
56 'tutorial/neighbor_loader':
57 '_static/thumbnails/neighbor_loader.png',
58 'tutorial/explain':
59 '_static/thumbnails/explain.png',
60 'tutorial/shallow_node_embeddings':
61 '_static/thumbnails/shallow_node_embeddings.png',
62 'tutorial/multi_gpu_vanilla':
63 '_static/thumbnails/multi_gpu_vanilla.png',
64 'tutorial/multi_node_multi_gpu_vanilla':
65 '_static/thumbnails/multi_gpu_vanilla.png',
66 }
67
68
69 def setup(app):
70 def rst_jinja_render(app, _, source):
71 rst_context = {'torch_geometric': torch_geometric}
72 source[0] = app.builder.templates.render_string(source[0], rst_context)
73
74 app.connect('source-read', rst_jinja_render)
75 app.add_js_file('js/version_alert.js')
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -66,10 +66,12 @@
}
-def setup(app):
- def rst_jinja_render(app, _, source):
+def rst_jinja_render(app, _, source):
+ if hasattr(app.builder, 'templates'):
rst_context = {'torch_geometric': torch_geometric}
source[0] = app.builder.templates.render_string(source[0], rst_context)
+
+def setup(app):
app.connect('source-read', rst_jinja_render)
app.add_js_file('js/version_alert.js')
| {"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -66,10 +66,12 @@\n }\n \n \n-def setup(app):\n- def rst_jinja_render(app, _, source):\n+def rst_jinja_render(app, _, source):\n+ if hasattr(app.builder, 'templates'):\n rst_context = {'torch_geometric': torch_geometric}\n source[0] = app.builder.templates.render_string(source[0], rst_context)\n \n+\n+def setup(app):\n app.connect('source-read', rst_jinja_render)\n app.add_js_file('js/version_alert.js')\n", "issue": "LatexBuilder for docs fails\n### \ud83d\udcda Describe the documentation issue\r\n\r\nThe following line makes the docs building crash when using a LatexBuilder\r\n\r\nhttps://github.com/pyg-team/pytorch_geometric/blob/88d7986b6d0a6de5895872270d2ff4fc95fae3b7/docs/source/conf.py#L69C1-L75C43\r\n\r\nTo reproduce build the docs with the latex builder\r\n```bash\r\npython -m sphinx -T -E -b latex -d _build/doctrees -D language=en . ./build\r\n```\r\n\r\n```bash\r\nExtension error:\r\nHandler <function setup.<locals>.rst_jinja_render at 0x1230b4dc0> for event 'source-read' threw an exception (exception: 'LaTeXBuilder' object has no attribute 'templates')\r\n\r\n```\r\n\r\n### Suggest a potential alternative/fix\r\n\r\n_No response_\n", "before_files": [{"content": "import datetime\nimport os.path as osp\nimport sys\n\nimport pyg_sphinx_theme\n\nimport torch_geometric\n\nauthor = 'PyG Team'\nproject = 'pytorch_geometric'\nversion = torch_geometric.__version__\ncopyright = f'{datetime.datetime.now().year}, {author}'\n\nsys.path.append(osp.join(osp.dirname(pyg_sphinx_theme.__file__), 'extension'))\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'nbsphinx',\n 'pyg',\n]\n\nhtml_theme = 'pyg_sphinx_theme'\nhtml_logo = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/'\n 'master/pyg_sphinx_theme/static/img/pyg_logo.png')\nhtml_favicon = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/'\n 'master/pyg_sphinx_theme/static/img/favicon.png')\nhtml_static_path = ['_static']\ntemplates_path = ['_templates']\n\nadd_module_names = False\nautodoc_member_order = 'bysource'\n\nsuppress_warnings = ['autodoc.import_object']\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/', None),\n # 'numpy': ('http://docs.scipy.org/doc/numpy', None),\n 'pandas': ('http://pandas.pydata.org/pandas-docs/dev', None),\n 'torch': ('https://pytorch.org/docs/master', None),\n}\n\nnbsphinx_thumbnails = {\n 'tutorial/create_gnn':\n '_static/thumbnails/create_gnn.png',\n 'tutorial/heterogeneous':\n '_static/thumbnails/heterogeneous.png',\n 'tutorial/create_dataset':\n '_static/thumbnails/create_dataset.png',\n 'tutorial/load_csv':\n '_static/thumbnails/load_csv.png',\n 'tutorial/neighbor_loader':\n '_static/thumbnails/neighbor_loader.png',\n 'tutorial/explain':\n '_static/thumbnails/explain.png',\n 'tutorial/shallow_node_embeddings':\n '_static/thumbnails/shallow_node_embeddings.png',\n 'tutorial/multi_gpu_vanilla':\n '_static/thumbnails/multi_gpu_vanilla.png',\n 'tutorial/multi_node_multi_gpu_vanilla':\n '_static/thumbnails/multi_gpu_vanilla.png',\n}\n\n\ndef setup(app):\n def rst_jinja_render(app, _, source):\n rst_context = {'torch_geometric': torch_geometric}\n source[0] = app.builder.templates.render_string(source[0], rst_context)\n\n app.connect('source-read', rst_jinja_render)\n app.add_js_file('js/version_alert.js')\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "import datetime\nimport os.path as osp\nimport sys\n\nimport pyg_sphinx_theme\n\nimport torch_geometric\n\nauthor = 'PyG Team'\nproject = 'pytorch_geometric'\nversion = torch_geometric.__version__\ncopyright = f'{datetime.datetime.now().year}, {author}'\n\nsys.path.append(osp.join(osp.dirname(pyg_sphinx_theme.__file__), 'extension'))\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'nbsphinx',\n 'pyg',\n]\n\nhtml_theme = 'pyg_sphinx_theme'\nhtml_logo = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/'\n 'master/pyg_sphinx_theme/static/img/pyg_logo.png')\nhtml_favicon = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/'\n 'master/pyg_sphinx_theme/static/img/favicon.png')\nhtml_static_path = ['_static']\ntemplates_path = ['_templates']\n\nadd_module_names = False\nautodoc_member_order = 'bysource'\n\nsuppress_warnings = ['autodoc.import_object']\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/', None),\n # 'numpy': ('http://docs.scipy.org/doc/numpy', None),\n 'pandas': ('http://pandas.pydata.org/pandas-docs/dev', None),\n 'torch': ('https://pytorch.org/docs/master', None),\n}\n\nnbsphinx_thumbnails = {\n 'tutorial/create_gnn':\n '_static/thumbnails/create_gnn.png',\n 'tutorial/heterogeneous':\n '_static/thumbnails/heterogeneous.png',\n 'tutorial/create_dataset':\n '_static/thumbnails/create_dataset.png',\n 'tutorial/load_csv':\n '_static/thumbnails/load_csv.png',\n 'tutorial/neighbor_loader':\n '_static/thumbnails/neighbor_loader.png',\n 'tutorial/explain':\n '_static/thumbnails/explain.png',\n 'tutorial/shallow_node_embeddings':\n '_static/thumbnails/shallow_node_embeddings.png',\n 'tutorial/multi_gpu_vanilla':\n '_static/thumbnails/multi_gpu_vanilla.png',\n 'tutorial/multi_node_multi_gpu_vanilla':\n '_static/thumbnails/multi_gpu_vanilla.png',\n}\n\n\ndef rst_jinja_render(app, _, source):\n if hasattr(app.builder, 'templates'):\n rst_context = {'torch_geometric': torch_geometric}\n source[0] = app.builder.templates.render_string(source[0], rst_context)\n\n\ndef setup(app):\n app.connect('source-read', rst_jinja_render)\n app.add_js_file('js/version_alert.js')\n", "path": "docs/source/conf.py"}]} | 1,199 | 142 |
gh_patches_debug_23721 | rasdani/github-patches | git_diff | ray-project__ray-11021 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Object spilling] Raylet automatically reloads spilled objects back into object store
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/experimental/object_spilling.py`
Content:
```
1 import ray
2
3
4 def force_spill_objects(object_refs):
5 """Force spilling objects to external storage.
6
7 Args:
8 object_refs: Object refs of the objects to be
9 spilled.
10 """
11 core_worker = ray.worker.global_worker.core_worker
12 # Make sure that the values are object refs.
13 for object_ref in object_refs:
14 if not isinstance(object_ref, ray.ObjectRef):
15 raise TypeError(
16 f"Attempting to call `force_spill_objects` on the "
17 f"value {object_ref}, which is not an ray.ObjectRef.")
18 return core_worker.force_spill_objects(object_refs)
19
20
21 def force_restore_spilled_objects(object_refs):
22 """Force restoring objects from external storage.
23
24 Args:
25 object_refs: Object refs of the objects to be
26 restored.
27 """
28 core_worker = ray.worker.global_worker.core_worker
29 # Make sure that the values are object refs.
30 for object_ref in object_refs:
31 if not isinstance(object_ref, ray.ObjectRef):
32 raise TypeError(
33 f"Attempting to call `force_restore_spilled_objects` on the "
34 f"value {object_ref}, which is not an ray.ObjectRef.")
35 return core_worker.force_restore_spilled_objects(object_refs)
36
```
Path: `python/ray/experimental/__init__.py`
Content:
```
1 from .dynamic_resources import set_resource
2 from .object_spilling import force_spill_objects, force_restore_spilled_objects
3 __all__ = [
4 "set_resource",
5 "force_spill_objects",
6 "force_restore_spilled_objects",
7 ]
8
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/experimental/__init__.py b/python/ray/experimental/__init__.py
--- a/python/ray/experimental/__init__.py
+++ b/python/ray/experimental/__init__.py
@@ -1,7 +1,6 @@
from .dynamic_resources import set_resource
-from .object_spilling import force_spill_objects, force_restore_spilled_objects
+from .object_spilling import force_spill_objects
__all__ = [
"set_resource",
"force_spill_objects",
- "force_restore_spilled_objects",
]
diff --git a/python/ray/experimental/object_spilling.py b/python/ray/experimental/object_spilling.py
--- a/python/ray/experimental/object_spilling.py
+++ b/python/ray/experimental/object_spilling.py
@@ -16,20 +16,3 @@
f"Attempting to call `force_spill_objects` on the "
f"value {object_ref}, which is not an ray.ObjectRef.")
return core_worker.force_spill_objects(object_refs)
-
-
-def force_restore_spilled_objects(object_refs):
- """Force restoring objects from external storage.
-
- Args:
- object_refs: Object refs of the objects to be
- restored.
- """
- core_worker = ray.worker.global_worker.core_worker
- # Make sure that the values are object refs.
- for object_ref in object_refs:
- if not isinstance(object_ref, ray.ObjectRef):
- raise TypeError(
- f"Attempting to call `force_restore_spilled_objects` on the "
- f"value {object_ref}, which is not an ray.ObjectRef.")
- return core_worker.force_restore_spilled_objects(object_refs)
| {"golden_diff": "diff --git a/python/ray/experimental/__init__.py b/python/ray/experimental/__init__.py\n--- a/python/ray/experimental/__init__.py\n+++ b/python/ray/experimental/__init__.py\n@@ -1,7 +1,6 @@\n from .dynamic_resources import set_resource\n-from .object_spilling import force_spill_objects, force_restore_spilled_objects\n+from .object_spilling import force_spill_objects\n __all__ = [\n \"set_resource\",\n \"force_spill_objects\",\n- \"force_restore_spilled_objects\",\n ]\ndiff --git a/python/ray/experimental/object_spilling.py b/python/ray/experimental/object_spilling.py\n--- a/python/ray/experimental/object_spilling.py\n+++ b/python/ray/experimental/object_spilling.py\n@@ -16,20 +16,3 @@\n f\"Attempting to call `force_spill_objects` on the \"\n f\"value {object_ref}, which is not an ray.ObjectRef.\")\n return core_worker.force_spill_objects(object_refs)\n-\n-\n-def force_restore_spilled_objects(object_refs):\n- \"\"\"Force restoring objects from external storage.\n-\n- Args:\n- object_refs: Object refs of the objects to be\n- restored.\n- \"\"\"\n- core_worker = ray.worker.global_worker.core_worker\n- # Make sure that the values are object refs.\n- for object_ref in object_refs:\n- if not isinstance(object_ref, ray.ObjectRef):\n- raise TypeError(\n- f\"Attempting to call `force_restore_spilled_objects` on the \"\n- f\"value {object_ref}, which is not an ray.ObjectRef.\")\n- return core_worker.force_restore_spilled_objects(object_refs)\n", "issue": "[Object spilling] Raylet automatically reloads spilled objects back into object store\n\r\n\n", "before_files": [{"content": "import ray\n\n\ndef force_spill_objects(object_refs):\n \"\"\"Force spilling objects to external storage.\n\n Args:\n object_refs: Object refs of the objects to be\n spilled.\n \"\"\"\n core_worker = ray.worker.global_worker.core_worker\n # Make sure that the values are object refs.\n for object_ref in object_refs:\n if not isinstance(object_ref, ray.ObjectRef):\n raise TypeError(\n f\"Attempting to call `force_spill_objects` on the \"\n f\"value {object_ref}, which is not an ray.ObjectRef.\")\n return core_worker.force_spill_objects(object_refs)\n\n\ndef force_restore_spilled_objects(object_refs):\n \"\"\"Force restoring objects from external storage.\n\n Args:\n object_refs: Object refs of the objects to be\n restored.\n \"\"\"\n core_worker = ray.worker.global_worker.core_worker\n # Make sure that the values are object refs.\n for object_ref in object_refs:\n if not isinstance(object_ref, ray.ObjectRef):\n raise TypeError(\n f\"Attempting to call `force_restore_spilled_objects` on the \"\n f\"value {object_ref}, which is not an ray.ObjectRef.\")\n return core_worker.force_restore_spilled_objects(object_refs)\n", "path": "python/ray/experimental/object_spilling.py"}, {"content": "from .dynamic_resources import set_resource\nfrom .object_spilling import force_spill_objects, force_restore_spilled_objects\n__all__ = [\n \"set_resource\",\n \"force_spill_objects\",\n \"force_restore_spilled_objects\",\n]\n", "path": "python/ray/experimental/__init__.py"}], "after_files": [{"content": "import ray\n\n\ndef force_spill_objects(object_refs):\n \"\"\"Force spilling objects to external storage.\n\n Args:\n object_refs: Object refs of the objects to be\n spilled.\n \"\"\"\n core_worker = ray.worker.global_worker.core_worker\n # Make sure that the values are object refs.\n for object_ref in object_refs:\n if not isinstance(object_ref, ray.ObjectRef):\n raise TypeError(\n f\"Attempting to call `force_spill_objects` on the \"\n f\"value {object_ref}, which is not an ray.ObjectRef.\")\n return core_worker.force_spill_objects(object_refs)\n", "path": "python/ray/experimental/object_spilling.py"}, {"content": "from .dynamic_resources import set_resource\nfrom .object_spilling import force_spill_objects\n__all__ = [\n \"set_resource\",\n \"force_spill_objects\",\n]\n", "path": "python/ray/experimental/__init__.py"}]} | 683 | 366 |
gh_patches_debug_24414 | rasdani/github-patches | git_diff | netbox-community__netbox-12192 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Introduce a permission specifically to allow the creation of API tokens for other users
### NetBox version
v3.4.7
### Feature type
Change to existing functionality
### Proposed functionality
This idea was [first proposed](https://github.com/netbox-community/netbox/issues/11091#issuecomment-1382039803) by @kkthxbye-code under #11091. This permission will control whether a specific user has the ability to create API tokens on behalf of other users.
### Use case
Provides more granular control over the creation of API tokens.
### Database changes
_No response_
### External dependencies
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/users/api/serializers.py`
Content:
```
1 from django.conf import settings
2 from django.contrib.auth.models import Group, User
3 from django.contrib.contenttypes.models import ContentType
4 from rest_framework import serializers
5
6 from netbox.api.fields import ContentTypeField, IPNetworkSerializer, SerializedPKRelatedField
7 from netbox.api.serializers import ValidatedModelSerializer
8 from users.models import ObjectPermission, Token
9 from .nested_serializers import *
10
11
12 __all__ = (
13 'GroupSerializer',
14 'ObjectPermissionSerializer',
15 'TokenSerializer',
16 'UserSerializer',
17 )
18
19
20 class UserSerializer(ValidatedModelSerializer):
21 url = serializers.HyperlinkedIdentityField(view_name='users-api:user-detail')
22 groups = SerializedPKRelatedField(
23 queryset=Group.objects.all(),
24 serializer=NestedGroupSerializer,
25 required=False,
26 many=True
27 )
28
29 class Meta:
30 model = User
31 fields = (
32 'id', 'url', 'display', 'username', 'password', 'first_name', 'last_name', 'email', 'is_staff', 'is_active',
33 'date_joined', 'groups',
34 )
35 extra_kwargs = {
36 'password': {'write_only': True}
37 }
38
39 def create(self, validated_data):
40 """
41 Extract the password from validated data and set it separately to ensure proper hash generation.
42 """
43 password = validated_data.pop('password')
44 user = super().create(validated_data)
45 user.set_password(password)
46 user.save()
47
48 return user
49
50 def get_display(self, obj):
51 if full_name := obj.get_full_name():
52 return f"{obj.username} ({full_name})"
53 return obj.username
54
55
56 class GroupSerializer(ValidatedModelSerializer):
57 url = serializers.HyperlinkedIdentityField(view_name='users-api:group-detail')
58 user_count = serializers.IntegerField(read_only=True)
59
60 class Meta:
61 model = Group
62 fields = ('id', 'url', 'display', 'name', 'user_count')
63
64
65 class TokenSerializer(ValidatedModelSerializer):
66 url = serializers.HyperlinkedIdentityField(view_name='users-api:token-detail')
67 key = serializers.CharField(
68 min_length=40,
69 max_length=40,
70 allow_blank=True,
71 required=False,
72 write_only=not settings.ALLOW_TOKEN_RETRIEVAL
73 )
74 user = NestedUserSerializer()
75 allowed_ips = serializers.ListField(
76 child=IPNetworkSerializer(),
77 required=False,
78 allow_empty=True,
79 default=[]
80 )
81
82 class Meta:
83 model = Token
84 fields = (
85 'id', 'url', 'display', 'user', 'created', 'expires', 'last_used', 'key', 'write_enabled', 'description',
86 'allowed_ips',
87 )
88
89 def to_internal_value(self, data):
90 if 'key' not in data:
91 data['key'] = Token.generate_key()
92 return super().to_internal_value(data)
93
94
95 class TokenProvisionSerializer(serializers.Serializer):
96 username = serializers.CharField()
97 password = serializers.CharField()
98
99
100 class ObjectPermissionSerializer(ValidatedModelSerializer):
101 url = serializers.HyperlinkedIdentityField(view_name='users-api:objectpermission-detail')
102 object_types = ContentTypeField(
103 queryset=ContentType.objects.all(),
104 many=True
105 )
106 groups = SerializedPKRelatedField(
107 queryset=Group.objects.all(),
108 serializer=NestedGroupSerializer,
109 required=False,
110 many=True
111 )
112 users = SerializedPKRelatedField(
113 queryset=User.objects.all(),
114 serializer=NestedUserSerializer,
115 required=False,
116 many=True
117 )
118
119 class Meta:
120 model = ObjectPermission
121 fields = (
122 'id', 'url', 'display', 'name', 'description', 'enabled', 'object_types', 'groups', 'users', 'actions',
123 'constraints',
124 )
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/users/api/serializers.py b/netbox/users/api/serializers.py
--- a/netbox/users/api/serializers.py
+++ b/netbox/users/api/serializers.py
@@ -2,6 +2,7 @@
from django.contrib.auth.models import Group, User
from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers
+from rest_framework.exceptions import PermissionDenied
from netbox.api.fields import ContentTypeField, IPNetworkSerializer, SerializedPKRelatedField
from netbox.api.serializers import ValidatedModelSerializer
@@ -91,6 +92,16 @@
data['key'] = Token.generate_key()
return super().to_internal_value(data)
+ def validate(self, data):
+
+ # If the Token is being created on behalf of another user, enforce the grant_token permission.
+ request = self.context.get('request')
+ token_user = data.get('user')
+ if token_user and token_user != request.user and not request.user.has_perm('users.grant_token'):
+ raise PermissionDenied("This user does not have permission to create tokens for other users.")
+
+ return super().validate(data)
+
class TokenProvisionSerializer(serializers.Serializer):
username = serializers.CharField()
| {"golden_diff": "diff --git a/netbox/users/api/serializers.py b/netbox/users/api/serializers.py\n--- a/netbox/users/api/serializers.py\n+++ b/netbox/users/api/serializers.py\n@@ -2,6 +2,7 @@\n from django.contrib.auth.models import Group, User\n from django.contrib.contenttypes.models import ContentType\n from rest_framework import serializers\n+from rest_framework.exceptions import PermissionDenied\n \n from netbox.api.fields import ContentTypeField, IPNetworkSerializer, SerializedPKRelatedField\n from netbox.api.serializers import ValidatedModelSerializer\n@@ -91,6 +92,16 @@\n data['key'] = Token.generate_key()\n return super().to_internal_value(data)\n \n+ def validate(self, data):\n+\n+ # If the Token is being created on behalf of another user, enforce the grant_token permission.\n+ request = self.context.get('request')\n+ token_user = data.get('user')\n+ if token_user and token_user != request.user and not request.user.has_perm('users.grant_token'):\n+ raise PermissionDenied(\"This user does not have permission to create tokens for other users.\")\n+\n+ return super().validate(data)\n+\n \n class TokenProvisionSerializer(serializers.Serializer):\n username = serializers.CharField()\n", "issue": "Introduce a permission specifically to allow the creation of API tokens for other users\n### NetBox version\n\nv3.4.7\n\n### Feature type\n\nChange to existing functionality\n\n### Proposed functionality\n\nThis idea was [first proposed](https://github.com/netbox-community/netbox/issues/11091#issuecomment-1382039803) by @kkthxbye-code under #11091. This permission will control whether a specific user has the ability to create API tokens on behalf of other users.\n\n### Use case\n\nProvides more granular control over the creation of API tokens.\n\n### Database changes\n\n_No response_\n\n### External dependencies\n\n_No response_\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.models import Group, User\nfrom django.contrib.contenttypes.models import ContentType\nfrom rest_framework import serializers\n\nfrom netbox.api.fields import ContentTypeField, IPNetworkSerializer, SerializedPKRelatedField\nfrom netbox.api.serializers import ValidatedModelSerializer\nfrom users.models import ObjectPermission, Token\nfrom .nested_serializers import *\n\n\n__all__ = (\n 'GroupSerializer',\n 'ObjectPermissionSerializer',\n 'TokenSerializer',\n 'UserSerializer',\n)\n\n\nclass UserSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:user-detail')\n groups = SerializedPKRelatedField(\n queryset=Group.objects.all(),\n serializer=NestedGroupSerializer,\n required=False,\n many=True\n )\n\n class Meta:\n model = User\n fields = (\n 'id', 'url', 'display', 'username', 'password', 'first_name', 'last_name', 'email', 'is_staff', 'is_active',\n 'date_joined', 'groups',\n )\n extra_kwargs = {\n 'password': {'write_only': True}\n }\n\n def create(self, validated_data):\n \"\"\"\n Extract the password from validated data and set it separately to ensure proper hash generation.\n \"\"\"\n password = validated_data.pop('password')\n user = super().create(validated_data)\n user.set_password(password)\n user.save()\n\n return user\n\n def get_display(self, obj):\n if full_name := obj.get_full_name():\n return f\"{obj.username} ({full_name})\"\n return obj.username\n\n\nclass GroupSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:group-detail')\n user_count = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = Group\n fields = ('id', 'url', 'display', 'name', 'user_count')\n\n\nclass TokenSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:token-detail')\n key = serializers.CharField(\n min_length=40,\n max_length=40,\n allow_blank=True,\n required=False,\n write_only=not settings.ALLOW_TOKEN_RETRIEVAL\n )\n user = NestedUserSerializer()\n allowed_ips = serializers.ListField(\n child=IPNetworkSerializer(),\n required=False,\n allow_empty=True,\n default=[]\n )\n\n class Meta:\n model = Token\n fields = (\n 'id', 'url', 'display', 'user', 'created', 'expires', 'last_used', 'key', 'write_enabled', 'description',\n 'allowed_ips',\n )\n\n def to_internal_value(self, data):\n if 'key' not in data:\n data['key'] = Token.generate_key()\n return super().to_internal_value(data)\n\n\nclass TokenProvisionSerializer(serializers.Serializer):\n username = serializers.CharField()\n password = serializers.CharField()\n\n\nclass ObjectPermissionSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:objectpermission-detail')\n object_types = ContentTypeField(\n queryset=ContentType.objects.all(),\n many=True\n )\n groups = SerializedPKRelatedField(\n queryset=Group.objects.all(),\n serializer=NestedGroupSerializer,\n required=False,\n many=True\n )\n users = SerializedPKRelatedField(\n queryset=User.objects.all(),\n serializer=NestedUserSerializer,\n required=False,\n many=True\n )\n\n class Meta:\n model = ObjectPermission\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'enabled', 'object_types', 'groups', 'users', 'actions',\n 'constraints',\n )\n", "path": "netbox/users/api/serializers.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.models import Group, User\nfrom django.contrib.contenttypes.models import ContentType\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import PermissionDenied\n\nfrom netbox.api.fields import ContentTypeField, IPNetworkSerializer, SerializedPKRelatedField\nfrom netbox.api.serializers import ValidatedModelSerializer\nfrom users.models import ObjectPermission, Token\nfrom .nested_serializers import *\n\n\n__all__ = (\n 'GroupSerializer',\n 'ObjectPermissionSerializer',\n 'TokenSerializer',\n 'UserSerializer',\n)\n\n\nclass UserSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:user-detail')\n groups = SerializedPKRelatedField(\n queryset=Group.objects.all(),\n serializer=NestedGroupSerializer,\n required=False,\n many=True\n )\n\n class Meta:\n model = User\n fields = (\n 'id', 'url', 'display', 'username', 'password', 'first_name', 'last_name', 'email', 'is_staff', 'is_active',\n 'date_joined', 'groups',\n )\n extra_kwargs = {\n 'password': {'write_only': True}\n }\n\n def create(self, validated_data):\n \"\"\"\n Extract the password from validated data and set it separately to ensure proper hash generation.\n \"\"\"\n password = validated_data.pop('password')\n user = super().create(validated_data)\n user.set_password(password)\n user.save()\n\n return user\n\n def get_display(self, obj):\n if full_name := obj.get_full_name():\n return f\"{obj.username} ({full_name})\"\n return obj.username\n\n\nclass GroupSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:group-detail')\n user_count = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = Group\n fields = ('id', 'url', 'display', 'name', 'user_count')\n\n\nclass TokenSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:token-detail')\n key = serializers.CharField(\n min_length=40,\n max_length=40,\n allow_blank=True,\n required=False,\n write_only=not settings.ALLOW_TOKEN_RETRIEVAL\n )\n user = NestedUserSerializer()\n allowed_ips = serializers.ListField(\n child=IPNetworkSerializer(),\n required=False,\n allow_empty=True,\n default=[]\n )\n\n class Meta:\n model = Token\n fields = (\n 'id', 'url', 'display', 'user', 'created', 'expires', 'last_used', 'key', 'write_enabled', 'description',\n 'allowed_ips',\n )\n\n def to_internal_value(self, data):\n if 'key' not in data:\n data['key'] = Token.generate_key()\n return super().to_internal_value(data)\n\n def validate(self, data):\n\n # If the Token is being created on behalf of another user, enforce the grant_token permission.\n request = self.context.get('request')\n token_user = data.get('user')\n if token_user and token_user != request.user and not request.user.has_perm('users.grant_token'):\n raise PermissionDenied(\"This user does not have permission to create tokens for other users.\")\n\n return super().validate(data)\n\n\nclass TokenProvisionSerializer(serializers.Serializer):\n username = serializers.CharField()\n password = serializers.CharField()\n\n\nclass ObjectPermissionSerializer(ValidatedModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='users-api:objectpermission-detail')\n object_types = ContentTypeField(\n queryset=ContentType.objects.all(),\n many=True\n )\n groups = SerializedPKRelatedField(\n queryset=Group.objects.all(),\n serializer=NestedGroupSerializer,\n required=False,\n many=True\n )\n users = SerializedPKRelatedField(\n queryset=User.objects.all(),\n serializer=NestedUserSerializer,\n required=False,\n many=True\n )\n\n class Meta:\n model = ObjectPermission\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'enabled', 'object_types', 'groups', 'users', 'actions',\n 'constraints',\n )\n", "path": "netbox/users/api/serializers.py"}]} | 1,476 | 268 |
gh_patches_debug_19276 | rasdani/github-patches | git_diff | spack__spack-5135 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot build elfutils
With the current head (b5eb298f3efde1ae32545a3363bed46e1811ab76)
```
$ spack install elfutils
==> Installing elfutils
==> Using cached archive: ~/Documents/git/spack/var/spack/cache/elfutils/elfutils-0.163.tar.bz2
==> Already staged elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3 in ~/Documents/git/spack/var/spack/stage/elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3
==> Already patched elfutils
==> Building elfutils [AutotoolsPackage]
==> Executing phase : 'autoreconf'
==> Executing phase : 'configure'
==> Error: ProcessError: Command exited with status 1:
'~/Documents/git/spack/var/spack/stage/elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3/elfutils-0.163/configure' '--prefix=~/Documents/git/spack/opt/spack/linux-debian8-x86_64/gcc-4.9.2/elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3' '--enable-maintainer-mode'
~/Documents/git/spack/lib/spack/spack/build_systems/autotools.py:266, in configure:
258 def configure(self, spec, prefix):
259 """Runs configure with the arguments specified in
260 :py:meth:`~.AutotoolsPackage.configure_args`
261 and an appropriately set prefix.
262 """
263 options = ['--prefix={0}'.format(prefix)] + self.configure_args()
264
265 with working_dir(self.build_directory, create=True):
>> 266 inspect.getmodule(self).configure(*options)
See build log for details:
~/spack-stage/spack-stage-9_hjUA/elfutils-0.163/spack-build.out
```
```
$ tail ~/spack-stage/spack-stage-9_hjUA/elfutils-0.163/spack-build.out
checking for inttypes.h... yes
checking for stdint.h... yes
checking for unistd.h... yes
checking size of long... 8
checking for struct user_regs_struct... yes
checking ~/Documents/git/spack/lib/spack/env/gcc/gcc option for 32-bit word size... -m32
checking for 64-bit host... yes
checking whether ~/Documents/git/spack/lib/spack/env/gcc/gcc -m32 makes executables we can run... yes
checking for flex... no
configure: error: flex needed in maintainer mode
```
Adding ```depends_on('flex')``` leads to
```
configure: error: bison needed in maintainer mode
```
Is this a know issue? How do I fix this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/elfutils/package.py`
Content:
```
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the NOTICE and LICENSE files for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from spack import *
26
27
28 class Elfutils(AutotoolsPackage):
29 """elfutils is a collection of various binary tools such as
30 eu-objdump, eu-readelf, and other utilities that allow you to
31 inspect and manipulate ELF files. Refer to Table 5.Tools Included
32 in elfutils for Red Hat Developer for a complete list of binary
33 tools that are distributed with the Red Hat Developer Toolset
34 version of elfutils."""
35
36 homepage = "https://fedorahosted.org/elfutils/"
37
38 url = "https://sourceware.org/elfutils/ftp/0.168/elfutils-0.168.tar.bz2"
39 list_url = "https://sourceware.org/elfutils/ftp"
40 list_depth = 1
41
42 version('0.168', '52adfa40758d0d39e5d5c57689bf38d6')
43 version('0.163', '77ce87f259987d2e54e4d87b86cbee41', preferred=True)
44
45 provides('elf@1')
46
47 def configure_args(self):
48 return ['--enable-maintainer-mode']
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/var/spack/repos/builtin/packages/elfutils/package.py b/var/spack/repos/builtin/packages/elfutils/package.py
--- a/var/spack/repos/builtin/packages/elfutils/package.py
+++ b/var/spack/repos/builtin/packages/elfutils/package.py
@@ -35,14 +35,22 @@
homepage = "https://fedorahosted.org/elfutils/"
- url = "https://sourceware.org/elfutils/ftp/0.168/elfutils-0.168.tar.bz2"
+ url = "https://sourceware.org/elfutils/ftp/0.168/elfutils-0.168.tar.bz2"
list_url = "https://sourceware.org/elfutils/ftp"
list_depth = 1
+ version('0.170', '03599aee98c9b726c7a732a2dd0245d5')
version('0.168', '52adfa40758d0d39e5d5c57689bf38d6')
version('0.163', '77ce87f259987d2e54e4d87b86cbee41', preferred=True)
+ depends_on('flex', type='build')
+ depends_on('bison', type='build')
+ depends_on('gettext')
+
provides('elf@1')
def configure_args(self):
- return ['--enable-maintainer-mode']
+ # configure doesn't use LIBS correctly
+ return [
+ 'LDFLAGS=-L%s -lintl' % self.spec['gettext'].prefix.lib,
+ '--enable-maintainer-mode']
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/elfutils/package.py b/var/spack/repos/builtin/packages/elfutils/package.py\n--- a/var/spack/repos/builtin/packages/elfutils/package.py\n+++ b/var/spack/repos/builtin/packages/elfutils/package.py\n@@ -35,14 +35,22 @@\n \n homepage = \"https://fedorahosted.org/elfutils/\"\n \n- url = \"https://sourceware.org/elfutils/ftp/0.168/elfutils-0.168.tar.bz2\"\n+ url = \"https://sourceware.org/elfutils/ftp/0.168/elfutils-0.168.tar.bz2\"\n list_url = \"https://sourceware.org/elfutils/ftp\"\n list_depth = 1\n \n+ version('0.170', '03599aee98c9b726c7a732a2dd0245d5')\n version('0.168', '52adfa40758d0d39e5d5c57689bf38d6')\n version('0.163', '77ce87f259987d2e54e4d87b86cbee41', preferred=True)\n \n+ depends_on('flex', type='build')\n+ depends_on('bison', type='build')\n+ depends_on('gettext')\n+\n provides('elf@1')\n \n def configure_args(self):\n- return ['--enable-maintainer-mode']\n+ # configure doesn't use LIBS correctly\n+ return [\n+ 'LDFLAGS=-L%s -lintl' % self.spec['gettext'].prefix.lib,\n+ '--enable-maintainer-mode']\n", "issue": "Cannot build elfutils\nWith the current head (b5eb298f3efde1ae32545a3363bed46e1811ab76) \r\n\r\n```\r\n$ spack install elfutils\r\n==> Installing elfutils\r\n==> Using cached archive: ~/Documents/git/spack/var/spack/cache/elfutils/elfutils-0.163.tar.bz2\r\n==> Already staged elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3 in ~/Documents/git/spack/var/spack/stage/elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3\r\n==> Already patched elfutils\r\n==> Building elfutils [AutotoolsPackage]\r\n==> Executing phase : 'autoreconf'\r\n==> Executing phase : 'configure'\r\n==> Error: ProcessError: Command exited with status 1:\r\n '~/Documents/git/spack/var/spack/stage/elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3/elfutils-0.163/configure' '--prefix=~/Documents/git/spack/opt/spack/linux-debian8-x86_64/gcc-4.9.2/elfutils-0.163-72pp6vadezugf5nmy5gbqksrmpukksa3' '--enable-maintainer-mode'\r\n~/Documents/git/spack/lib/spack/spack/build_systems/autotools.py:266, in configure:\r\n 258 def configure(self, spec, prefix):\r\n 259 \"\"\"Runs configure with the arguments specified in\r\n 260 :py:meth:`~.AutotoolsPackage.configure_args`\r\n 261 and an appropriately set prefix.\r\n 262 \"\"\"\r\n 263 options = ['--prefix={0}'.format(prefix)] + self.configure_args()\r\n 264 \r\n 265 with working_dir(self.build_directory, create=True):\r\n >> 266 inspect.getmodule(self).configure(*options)\r\n\r\nSee build log for details:\r\n ~/spack-stage/spack-stage-9_hjUA/elfutils-0.163/spack-build.out\r\n```\r\n```\r\n$ tail ~/spack-stage/spack-stage-9_hjUA/elfutils-0.163/spack-build.out\r\nchecking for inttypes.h... yes\r\nchecking for stdint.h... yes\r\nchecking for unistd.h... yes\r\nchecking size of long... 8\r\nchecking for struct user_regs_struct... yes\r\nchecking ~/Documents/git/spack/lib/spack/env/gcc/gcc option for 32-bit word size... -m32\r\nchecking for 64-bit host... yes\r\nchecking whether ~/Documents/git/spack/lib/spack/env/gcc/gcc -m32 makes executables we can run... yes\r\nchecking for flex... no\r\nconfigure: error: flex needed in maintainer mode\r\n```\r\nAdding ```depends_on('flex')``` leads to \r\n```\r\nconfigure: error: bison needed in maintainer mode\r\n```\r\n\r\nIs this a know issue? How do I fix this?\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Elfutils(AutotoolsPackage):\n \"\"\"elfutils is a collection of various binary tools such as\n eu-objdump, eu-readelf, and other utilities that allow you to\n inspect and manipulate ELF files. Refer to Table 5.Tools Included\n in elfutils for Red Hat Developer for a complete list of binary\n tools that are distributed with the Red Hat Developer Toolset\n version of elfutils.\"\"\"\n\n homepage = \"https://fedorahosted.org/elfutils/\"\n\n url = \"https://sourceware.org/elfutils/ftp/0.168/elfutils-0.168.tar.bz2\"\n list_url = \"https://sourceware.org/elfutils/ftp\"\n list_depth = 1\n\n version('0.168', '52adfa40758d0d39e5d5c57689bf38d6')\n version('0.163', '77ce87f259987d2e54e4d87b86cbee41', preferred=True)\n\n provides('elf@1')\n\n def configure_args(self):\n return ['--enable-maintainer-mode']\n", "path": "var/spack/repos/builtin/packages/elfutils/package.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Elfutils(AutotoolsPackage):\n \"\"\"elfutils is a collection of various binary tools such as\n eu-objdump, eu-readelf, and other utilities that allow you to\n inspect and manipulate ELF files. Refer to Table 5.Tools Included\n in elfutils for Red Hat Developer for a complete list of binary\n tools that are distributed with the Red Hat Developer Toolset\n version of elfutils.\"\"\"\n\n homepage = \"https://fedorahosted.org/elfutils/\"\n\n url = \"https://sourceware.org/elfutils/ftp/0.168/elfutils-0.168.tar.bz2\"\n list_url = \"https://sourceware.org/elfutils/ftp\"\n list_depth = 1\n\n version('0.170', '03599aee98c9b726c7a732a2dd0245d5')\n version('0.168', '52adfa40758d0d39e5d5c57689bf38d6')\n version('0.163', '77ce87f259987d2e54e4d87b86cbee41', preferred=True)\n\n depends_on('flex', type='build')\n depends_on('bison', type='build')\n depends_on('gettext')\n\n provides('elf@1')\n\n def configure_args(self):\n # configure doesn't use LIBS correctly\n return [\n 'LDFLAGS=-L%s -lintl' % self.spec['gettext'].prefix.lib,\n '--enable-maintainer-mode']\n", "path": "var/spack/repos/builtin/packages/elfutils/package.py"}]} | 1,635 | 413 |
gh_patches_debug_11891 | rasdani/github-patches | git_diff | saleor__saleor-2368 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot place order in saleor demo's storefront
### What I'm trying to achieve
Place an order on the demo store to reproduce another bug. :wink:
### Steps to reproduce the problem
1. Create a cart with an item;
2. Follow the checkout until the summary page;
3. Try to hit "Order & Pay";
4. A server error should occur.
**System information**
```
Host: demo.getsaleor.com
User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0
Accept-Language: en,en-GB;q=0.8,en-US
Accept-Encoding: gzip, deflate, br
Referer: https://demo.getsaleor.com/en/checkout/summary/
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/account/utils.py`
Content:
```
1 from ..checkout import AddressType
2 from ..core.demo_obfuscators import obfuscate_address
3
4
5 def store_user_address(user, address, address_type):
6 """Add address to user address book and set as default one."""
7 address, _ = user.addresses.get_or_create(**address.as_data())
8
9 # DEMO: obfuscate user address
10 address = obfuscate_address(address)
11
12 if address_type == AddressType.BILLING:
13 if not user.default_billing_address:
14 user.default_billing_address = address
15 user.save(update_fields=['default_billing_address'])
16 elif address_type == AddressType.SHIPPING:
17 if not user.default_shipping_address:
18 user.default_shipping_address = address
19 user.save(update_fields=['default_shipping_address'])
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/account/utils.py b/saleor/account/utils.py
--- a/saleor/account/utils.py
+++ b/saleor/account/utils.py
@@ -4,11 +4,11 @@
def store_user_address(user, address, address_type):
"""Add address to user address book and set as default one."""
- address, _ = user.addresses.get_or_create(**address.as_data())
-
# DEMO: obfuscate user address
address = obfuscate_address(address)
+ address, _ = user.addresses.get_or_create(**address.as_data())
+
if address_type == AddressType.BILLING:
if not user.default_billing_address:
user.default_billing_address = address
| {"golden_diff": "diff --git a/saleor/account/utils.py b/saleor/account/utils.py\n--- a/saleor/account/utils.py\n+++ b/saleor/account/utils.py\n@@ -4,11 +4,11 @@\n \n def store_user_address(user, address, address_type):\n \"\"\"Add address to user address book and set as default one.\"\"\"\n- address, _ = user.addresses.get_or_create(**address.as_data())\n-\n # DEMO: obfuscate user address\n address = obfuscate_address(address)\n \n+ address, _ = user.addresses.get_or_create(**address.as_data())\n+\n if address_type == AddressType.BILLING:\n if not user.default_billing_address:\n user.default_billing_address = address\n", "issue": "Cannot place order in saleor demo's storefront\n### What I'm trying to achieve\r\nPlace an order on the demo store to reproduce another bug. :wink:\r\n\r\n### Steps to reproduce the problem\r\n1. Create a cart with an item;\r\n2. Follow the checkout until the summary page;\r\n3. Try to hit \"Order & Pay\";\r\n4. A server error should occur.\r\n\r\n**System information**\r\n```\r\nHost: demo.getsaleor.com\r\nUser-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0\r\nAccept-Language: en,en-GB;q=0.8,en-US\r\nAccept-Encoding: gzip, deflate, br\r\nReferer: https://demo.getsaleor.com/en/checkout/summary/\r\n```\n", "before_files": [{"content": "from ..checkout import AddressType\nfrom ..core.demo_obfuscators import obfuscate_address\n\n\ndef store_user_address(user, address, address_type):\n \"\"\"Add address to user address book and set as default one.\"\"\"\n address, _ = user.addresses.get_or_create(**address.as_data())\n\n # DEMO: obfuscate user address\n address = obfuscate_address(address)\n\n if address_type == AddressType.BILLING:\n if not user.default_billing_address:\n user.default_billing_address = address\n user.save(update_fields=['default_billing_address'])\n elif address_type == AddressType.SHIPPING:\n if not user.default_shipping_address:\n user.default_shipping_address = address\n user.save(update_fields=['default_shipping_address'])\n", "path": "saleor/account/utils.py"}], "after_files": [{"content": "from ..checkout import AddressType\nfrom ..core.demo_obfuscators import obfuscate_address\n\n\ndef store_user_address(user, address, address_type):\n \"\"\"Add address to user address book and set as default one.\"\"\"\n # DEMO: obfuscate user address\n address = obfuscate_address(address)\n\n address, _ = user.addresses.get_or_create(**address.as_data())\n\n if address_type == AddressType.BILLING:\n if not user.default_billing_address:\n user.default_billing_address = address\n user.save(update_fields=['default_billing_address'])\n elif address_type == AddressType.SHIPPING:\n if not user.default_shipping_address:\n user.default_shipping_address = address\n user.save(update_fields=['default_shipping_address'])\n", "path": "saleor/account/utils.py"}]} | 627 | 158 |
gh_patches_debug_56935 | rasdani/github-patches | git_diff | quantopian__zipline-1707 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
latest tutorial.ipynb has non working examples
Dear Zipline Maintainers,
Before I tell you about my issue, let me describe my environment:
# Environment
- Operating System: (MAC OS X El Capitan`)
- Python Version: `$ python --3.4`
- Python Bitness: `$ python -c 'import math, sys;print(int(math.log(sys.maxsize + 1, 2) + 1))'`
- How did you install Zipline: (`pip`)
- Python packages: `$ pip freeze` or `$ conda list`
Now that you know a little about me, let me tell you about the issue I am
having
# Description of Issue
While going through the latest tutorial.ipynb it throws an error:
TypeError: a float is required
- What did you expect to happen?
I ran the notebook and expected to see the same results as in your notebook
- What happened instead?
An error:
TypeError: a float is required
Here is how you can reproduce this issue on your machine:
## Reproduction Steps
1.Run the last cell in the tutorial
...
## What steps have you taken to resolve this already?
I was trying to identify where the errors belongs to by commenting the lines of code. I'm a beginner , so I don't know how to solve it yet. It seems like the error is thrown when accessing the line:
short_mavg = history(100, '1d', 'price').mean()
...
# Anything else?
...
Sincerely,
`$ whoami`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zipline/examples/buyapple.py`
Content:
```
1 #!/usr/bin/env python
2 #
3 # Copyright 2014 Quantopian, Inc.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from zipline.api import order, record, symbol
18
19
20 def initialize(context):
21 pass
22
23
24 def handle_data(context, data):
25 order(symbol('AAPL'), 10)
26 record(AAPL=data.current(symbol('AAPL'), 'price'))
27
28
29 # Note: this function can be removed if running
30 # this algorithm on quantopian.com
31 def analyze(context=None, results=None):
32 import matplotlib.pyplot as plt
33 # Plot the portfolio and asset data.
34 ax1 = plt.subplot(211)
35 results.portfolio_value.plot(ax=ax1)
36 ax1.set_ylabel('Portfolio value (USD)')
37 ax2 = plt.subplot(212, sharex=ax1)
38 results.AAPL.plot(ax=ax2)
39 ax2.set_ylabel('AAPL price (USD)')
40
41 # Show the plot.
42 plt.gcf().set_size_inches(18, 8)
43 plt.show()
44
45
46 def _test_args():
47 """Extra arguments to use when zipline's automated tests run this example.
48 """
49 import pandas as pd
50
51 return {
52 'start': pd.Timestamp('2014-01-01', tz='utc'),
53 'end': pd.Timestamp('2014-11-01', tz='utc'),
54 }
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zipline/examples/buyapple.py b/zipline/examples/buyapple.py
--- a/zipline/examples/buyapple.py
+++ b/zipline/examples/buyapple.py
@@ -18,12 +18,12 @@
def initialize(context):
- pass
+ context.asset = symbol('AAPL')
def handle_data(context, data):
- order(symbol('AAPL'), 10)
- record(AAPL=data.current(symbol('AAPL'), 'price'))
+ order(context.asset, 10)
+ record(AAPL=data.current(context.asset, 'price'))
# Note: this function can be removed if running
| {"golden_diff": "diff --git a/zipline/examples/buyapple.py b/zipline/examples/buyapple.py\n--- a/zipline/examples/buyapple.py\n+++ b/zipline/examples/buyapple.py\n@@ -18,12 +18,12 @@\n \n \n def initialize(context):\n- pass\n+ context.asset = symbol('AAPL')\n \n \n def handle_data(context, data):\n- order(symbol('AAPL'), 10)\n- record(AAPL=data.current(symbol('AAPL'), 'price'))\n+ order(context.asset, 10)\n+ record(AAPL=data.current(context.asset, 'price'))\n \n \n # Note: this function can be removed if running\n", "issue": "latest tutorial.ipynb has non working examples \nDear Zipline Maintainers,\n\nBefore I tell you about my issue, let me describe my environment:\n# Environment\n- Operating System: (MAC OS X El Capitan`)\n- Python Version: `$ python --3.4`\n- Python Bitness: `$ python -c 'import math, sys;print(int(math.log(sys.maxsize + 1, 2) + 1))'`\n- How did you install Zipline: (`pip`)\n- Python packages: `$ pip freeze` or `$ conda list`\n\nNow that you know a little about me, let me tell you about the issue I am\nhaving\n# Description of Issue\n\nWhile going through the latest tutorial.ipynb it throws an error:\nTypeError: a float is required\n- What did you expect to happen?\n I ran the notebook and expected to see the same results as in your notebook\n- What happened instead?\n An error:\n TypeError: a float is required\n\nHere is how you can reproduce this issue on your machine:\n## Reproduction Steps\n\n1.Run the last cell in the tutorial\n\n...\n## What steps have you taken to resolve this already?\n\nI was trying to identify where the errors belongs to by commenting the lines of code. I'm a beginner , so I don't know how to solve it yet. It seems like the error is thrown when accessing the line:\nshort_mavg = history(100, '1d', 'price').mean()\n...\n# Anything else?\n\n...\n\nSincerely,\n`$ whoami`\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Copyright 2014 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom zipline.api import order, record, symbol\n\n\ndef initialize(context):\n pass\n\n\ndef handle_data(context, data):\n order(symbol('AAPL'), 10)\n record(AAPL=data.current(symbol('AAPL'), 'price'))\n\n\n# Note: this function can be removed if running\n# this algorithm on quantopian.com\ndef analyze(context=None, results=None):\n import matplotlib.pyplot as plt\n # Plot the portfolio and asset data.\n ax1 = plt.subplot(211)\n results.portfolio_value.plot(ax=ax1)\n ax1.set_ylabel('Portfolio value (USD)')\n ax2 = plt.subplot(212, sharex=ax1)\n results.AAPL.plot(ax=ax2)\n ax2.set_ylabel('AAPL price (USD)')\n\n # Show the plot.\n plt.gcf().set_size_inches(18, 8)\n plt.show()\n\n\ndef _test_args():\n \"\"\"Extra arguments to use when zipline's automated tests run this example.\n \"\"\"\n import pandas as pd\n\n return {\n 'start': pd.Timestamp('2014-01-01', tz='utc'),\n 'end': pd.Timestamp('2014-11-01', tz='utc'),\n }\n", "path": "zipline/examples/buyapple.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# Copyright 2014 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom zipline.api import order, record, symbol\n\n\ndef initialize(context):\n context.asset = symbol('AAPL')\n\n\ndef handle_data(context, data):\n order(context.asset, 10)\n record(AAPL=data.current(context.asset, 'price'))\n\n\n# Note: this function can be removed if running\n# this algorithm on quantopian.com\ndef analyze(context=None, results=None):\n import matplotlib.pyplot as plt\n # Plot the portfolio and asset data.\n ax1 = plt.subplot(211)\n results.portfolio_value.plot(ax=ax1)\n ax1.set_ylabel('Portfolio value (USD)')\n ax2 = plt.subplot(212, sharex=ax1)\n results.AAPL.plot(ax=ax2)\n ax2.set_ylabel('AAPL price (USD)')\n\n # Show the plot.\n plt.gcf().set_size_inches(18, 8)\n plt.show()\n\n\ndef _test_args():\n \"\"\"Extra arguments to use when zipline's automated tests run this example.\n \"\"\"\n import pandas as pd\n\n return {\n 'start': pd.Timestamp('2014-01-01', tz='utc'),\n 'end': pd.Timestamp('2014-11-01', tz='utc'),\n }\n", "path": "zipline/examples/buyapple.py"}]} | 1,102 | 149 |
gh_patches_debug_1217 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2857 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError: Missing staticfiles manifest entry for 'js/select_dropdown_init.js'
https://sentry.liqd.net/sentry/meinberlin-dev/issues/1032/
```
ValueError: Missing staticfiles manifest entry for 'js/select_dropdown_init.js'
(35 additional frame(s) were not displayed)
...
File "django/templatetags/static.py", line 118, in handle_simple
return staticfiles_storage.url(path)
File "django_cloudflare_push/middleware.py", line 47, in url
return super(DebugStaticFilesStorage, self).url(path)
File "django/contrib/staticfiles/storage.py", line 153, in url
return self._url(self.stored_name, name, force)
File "django/contrib/staticfiles/storage.py", line 132, in _url
hashed_name = hashed_name_func(*args)
File "django/contrib/staticfiles/storage.py", line 420, in stored_name
raise ValueError("Missing staticfiles manifest entry for '%s'" % clean_name)
Internal Server Error: /kiezkasse/create/module/kiezkasse-2/
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/mapideas/forms.py`
Content:
```
1 from django import forms
2 from django.utils.translation import ugettext_lazy as _
3
4 from adhocracy4.categories.forms import CategorizableFieldMixin
5 from adhocracy4.labels.mixins import LabelsAddableFieldMixin
6 from adhocracy4.maps import widgets as maps_widgets
7 from meinberlin.apps.contrib.mixins import ImageRightOfUseMixin
8
9 from . import models
10
11
12 class MapIdeaForm(CategorizableFieldMixin,
13 LabelsAddableFieldMixin,
14 ImageRightOfUseMixin):
15
16 def __init__(self, *args, **kwargs):
17 self.settings = kwargs.pop('settings_instance')
18 super().__init__(*args, **kwargs)
19 self.fields['point'].widget = maps_widgets.MapChoosePointWidget(
20 polygon=self.settings.polygon)
21 self.fields['point'].error_messages['required'] = _(
22 'Please locate your proposal on the map.')
23
24 class Media:
25 js = ('js/select_dropdown_init.js',)
26
27 class Meta:
28 model = models.MapIdea
29 fields = ['name', 'description', 'image', 'category',
30 'labels', 'point', 'point_label']
31
32
33 class MapIdeaModerateForm(forms.ModelForm):
34 class Meta:
35 model = models.MapIdea
36 fields = ['moderator_feedback']
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/mapideas/forms.py b/meinberlin/apps/mapideas/forms.py
--- a/meinberlin/apps/mapideas/forms.py
+++ b/meinberlin/apps/mapideas/forms.py
@@ -22,7 +22,7 @@
'Please locate your proposal on the map.')
class Media:
- js = ('js/select_dropdown_init.js',)
+ js = ('select_dropdown_init.js',)
class Meta:
model = models.MapIdea
| {"golden_diff": "diff --git a/meinberlin/apps/mapideas/forms.py b/meinberlin/apps/mapideas/forms.py\n--- a/meinberlin/apps/mapideas/forms.py\n+++ b/meinberlin/apps/mapideas/forms.py\n@@ -22,7 +22,7 @@\n 'Please locate your proposal on the map.')\n \n class Media:\n- js = ('js/select_dropdown_init.js',)\n+ js = ('select_dropdown_init.js',)\n \n class Meta:\n model = models.MapIdea\n", "issue": "ValueError: Missing staticfiles manifest entry for 'js/select_dropdown_init.js'\nhttps://sentry.liqd.net/sentry/meinberlin-dev/issues/1032/\n\n```\nValueError: Missing staticfiles manifest entry for 'js/select_dropdown_init.js'\n(35 additional frame(s) were not displayed)\n...\n File \"django/templatetags/static.py\", line 118, in handle_simple\n return staticfiles_storage.url(path)\n File \"django_cloudflare_push/middleware.py\", line 47, in url\n return super(DebugStaticFilesStorage, self).url(path)\n File \"django/contrib/staticfiles/storage.py\", line 153, in url\n return self._url(self.stored_name, name, force)\n File \"django/contrib/staticfiles/storage.py\", line 132, in _url\n hashed_name = hashed_name_func(*args)\n File \"django/contrib/staticfiles/storage.py\", line 420, in stored_name\n raise ValueError(\"Missing staticfiles manifest entry for '%s'\" % clean_name)\n\nInternal Server Error: /kiezkasse/create/module/kiezkasse-2/\n```\n", "before_files": [{"content": "from django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.categories.forms import CategorizableFieldMixin\nfrom adhocracy4.labels.mixins import LabelsAddableFieldMixin\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom meinberlin.apps.contrib.mixins import ImageRightOfUseMixin\n\nfrom . import models\n\n\nclass MapIdeaForm(CategorizableFieldMixin,\n LabelsAddableFieldMixin,\n ImageRightOfUseMixin):\n\n def __init__(self, *args, **kwargs):\n self.settings = kwargs.pop('settings_instance')\n super().__init__(*args, **kwargs)\n self.fields['point'].widget = maps_widgets.MapChoosePointWidget(\n polygon=self.settings.polygon)\n self.fields['point'].error_messages['required'] = _(\n 'Please locate your proposal on the map.')\n\n class Media:\n js = ('js/select_dropdown_init.js',)\n\n class Meta:\n model = models.MapIdea\n fields = ['name', 'description', 'image', 'category',\n 'labels', 'point', 'point_label']\n\n\nclass MapIdeaModerateForm(forms.ModelForm):\n class Meta:\n model = models.MapIdea\n fields = ['moderator_feedback']\n", "path": "meinberlin/apps/mapideas/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.categories.forms import CategorizableFieldMixin\nfrom adhocracy4.labels.mixins import LabelsAddableFieldMixin\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom meinberlin.apps.contrib.mixins import ImageRightOfUseMixin\n\nfrom . import models\n\n\nclass MapIdeaForm(CategorizableFieldMixin,\n LabelsAddableFieldMixin,\n ImageRightOfUseMixin):\n\n def __init__(self, *args, **kwargs):\n self.settings = kwargs.pop('settings_instance')\n super().__init__(*args, **kwargs)\n self.fields['point'].widget = maps_widgets.MapChoosePointWidget(\n polygon=self.settings.polygon)\n self.fields['point'].error_messages['required'] = _(\n 'Please locate your proposal on the map.')\n\n class Media:\n js = ('select_dropdown_init.js',)\n\n class Meta:\n model = models.MapIdea\n fields = ['name', 'description', 'image', 'category',\n 'labels', 'point', 'point_label']\n\n\nclass MapIdeaModerateForm(forms.ModelForm):\n class Meta:\n model = models.MapIdea\n fields = ['moderator_feedback']\n", "path": "meinberlin/apps/mapideas/forms.py"}]} | 850 | 109 |
gh_patches_debug_27981 | rasdani/github-patches | git_diff | ocadotechnology__codeforlife-portal-641 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Postcode / Zipcode error message at School creation is misleading
Since most web browsers autocomplete the postcode in the class creation form as the email address, teachers receive a badly worded AND badly positioned error message (the message makes it sound like the error is in the name)
It should mention something like, please input a valid postcode / zipcode
and be below the postcode field.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `portal/forms/organisation.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Code for Life
3 #
4 # Copyright (C) 2017, Ocado Innovation Limited
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 # ADDITIONAL TERMS – Section 7 GNU General Public Licence
20 #
21 # This licence does not grant any right, title or interest in any “Ocado” logos,
22 # trade names or the trademark “Ocado” or any other trademarks or domain names
23 # owned by Ocado Innovation Limited or the Ocado group of companies or any other
24 # distinctive brand features of “Ocado” as may be secured from time to time. You
25 # must not distribute any modification of this program using the trademark
26 # “Ocado” or claim any affiliation or association with Ocado or its employees.
27 #
28 # You are not authorised to use the name Ocado (or any of its trade names) or
29 # the names of any author or contributor in advertising or for publicity purposes
30 # pertaining to the distribution of this program, without the prior written
31 # authorisation of Ocado.
32 #
33 # Any propagation, distribution or conveyance of this program must include this
34 # copyright notice and these terms. You must not misrepresent the origins of this
35 # program; modified versions of the program must be marked as such and not
36 # identified as the original program.
37 from django import forms
38
39 from portal.models import School
40
41 from django_countries.widgets import CountrySelectWidget
42 from django.core.exceptions import ObjectDoesNotExist
43
44
45 class OrganisationForm(forms.ModelForm):
46
47 current_password = forms.CharField(
48 label='Enter your password',
49 widget=forms.PasswordInput(attrs={'autocomplete': "off"}))
50
51 class Meta:
52 model = School
53 fields = ['name', 'postcode', 'country']
54 labels = {
55 'name' : "Name of your school or club",
56 'postcode' : 'Postcode',
57 'country' : 'Country',
58 }
59 widgets = {
60 'name' : forms.TextInput(attrs={'autocomplete': "off", 'placeholder': 'Name of your school or club'}),
61 'postcode' : forms.TextInput(attrs={'autocomplete': "off", 'placeholder': 'Postcode'}),
62 'country' : CountrySelectWidget(attrs={'class': 'wide'}),
63 }
64
65 def __init__(self, *args, **kwargs):
66 self.user = kwargs.pop('user', None)
67 self.current_school = kwargs.pop('current_school', None)
68 super(OrganisationForm, self).__init__(*args, **kwargs)
69 if self.current_school:
70 del self.fields['current_password']
71
72 def clean(self):
73 name = self.cleaned_data.get('name', None)
74 postcode = self.cleaned_data.get('postcode', None)
75
76 if name and postcode:
77 try:
78 school = School.objects.get(name=name, postcode=postcode)
79 except ObjectDoesNotExist:
80 return self.cleaned_data
81
82 if not self.current_school or self.current_school.id != school.id:
83 raise forms.ValidationError(
84 "There is already a school or club registered with that name and postcode")
85
86 return self.cleaned_data
87
88 def clean_postcode(self):
89 postcode = self.cleaned_data.get('postcode', None)
90
91 if postcode:
92 # Basic postcode check for now
93 if not len(postcode.replace(' ', '')) > 0:
94 raise forms.ValidationError("That postcode was not recognised")
95
96 return postcode
97
98 def clean_current_password(self):
99 current_password = self.cleaned_data.get('current_password', None)
100 if not self.user.check_password(current_password):
101 raise forms.ValidationError("Your password was incorrect")
102
103
104 class OrganisationJoinForm(forms.Form):
105 fuzzy_name = forms.CharField(
106 label="Search for school or club by name or postcode",
107 widget=forms.TextInput(
108 attrs={'placeholder': "Enrico Fermi High School"}))
109
110 # Note: the reason this is a CharField rather than a ChoiceField is to avoid having to
111 # provide choices which was problematic given that the options are dynamically generated.
112 chosen_org = forms.CharField(
113 label='Select school or club',
114 widget=forms.Select(attrs={'class': 'wide'}))
115
116 def clean_chosen_org(self):
117 chosen_org = self.cleaned_data.get('chosen_org', None)
118
119 if chosen_org and not School.objects.filter(id=int(chosen_org)).exists():
120 raise forms.ValidationError("That school or club was not recognised")
121
122 return chosen_org
123
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/portal/forms/organisation.py b/portal/forms/organisation.py
--- a/portal/forms/organisation.py
+++ b/portal/forms/organisation.py
@@ -52,14 +52,14 @@
model = School
fields = ['name', 'postcode', 'country']
labels = {
- 'name' : "Name of your school or club",
- 'postcode' : 'Postcode',
- 'country' : 'Country',
+ 'name': "Name of your school or club",
+ 'postcode': 'Postcode',
+ 'country': 'Country',
}
widgets = {
- 'name' : forms.TextInput(attrs={'autocomplete': "off", 'placeholder': 'Name of your school or club'}),
- 'postcode' : forms.TextInput(attrs={'autocomplete': "off", 'placeholder': 'Postcode'}),
- 'country' : CountrySelectWidget(attrs={'class': 'wide'}),
+ 'name': forms.TextInput(attrs={'autocomplete': "off", 'placeholder': 'Name of your school or club'}),
+ 'postcode': forms.TextInput(attrs={'autocomplete': "off", 'placeholder': 'Postcode'}),
+ 'country': CountrySelectWidget(attrs={'class': 'wide'}),
}
def __init__(self, *args, **kwargs):
@@ -89,9 +89,8 @@
postcode = self.cleaned_data.get('postcode', None)
if postcode:
- # Basic postcode check for now
- if not len(postcode.replace(' ', '')) > 0:
- raise forms.ValidationError("That postcode was not recognised")
+ if len(postcode.replace(' ', '')) > 10 or len(postcode.replace(' ', '')) == 0:
+ raise forms.ValidationError("Please enter a valid postcode or ZIP code")
return postcode
| {"golden_diff": "diff --git a/portal/forms/organisation.py b/portal/forms/organisation.py\n--- a/portal/forms/organisation.py\n+++ b/portal/forms/organisation.py\n@@ -52,14 +52,14 @@\n model = School\n fields = ['name', 'postcode', 'country']\n labels = {\n- 'name' : \"Name of your school or club\",\n- 'postcode' : 'Postcode',\n- 'country' : 'Country',\n+ 'name': \"Name of your school or club\",\n+ 'postcode': 'Postcode',\n+ 'country': 'Country',\n }\n widgets = {\n- 'name' : forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Name of your school or club'}),\n- 'postcode' : forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Postcode'}),\n- 'country' : CountrySelectWidget(attrs={'class': 'wide'}),\n+ 'name': forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Name of your school or club'}),\n+ 'postcode': forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Postcode'}),\n+ 'country': CountrySelectWidget(attrs={'class': 'wide'}),\n }\n \n def __init__(self, *args, **kwargs):\n@@ -89,9 +89,8 @@\n postcode = self.cleaned_data.get('postcode', None)\n \n if postcode:\n- # Basic postcode check for now\n- if not len(postcode.replace(' ', '')) > 0:\n- raise forms.ValidationError(\"That postcode was not recognised\")\n+ if len(postcode.replace(' ', '')) > 10 or len(postcode.replace(' ', '')) == 0:\n+ raise forms.ValidationError(\"Please enter a valid postcode or ZIP code\")\n \n return postcode\n", "issue": "Postcode / Zipcode error message at School creation is misleading\nSince most web browsers autocomplete the postcode in the class creation form as the email address, teachers receive a badly worded AND badly positioned error message (the message makes it sound like the error is in the name)\r\nIt should mention something like, please input a valid postcode / zipcode \r\nand be below the postcode field.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2017, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom django import forms\n\nfrom portal.models import School\n\nfrom django_countries.widgets import CountrySelectWidget\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\nclass OrganisationForm(forms.ModelForm):\n\n current_password = forms.CharField(\n label='Enter your password',\n widget=forms.PasswordInput(attrs={'autocomplete': \"off\"}))\n\n class Meta:\n model = School\n fields = ['name', 'postcode', 'country']\n labels = {\n 'name' : \"Name of your school or club\",\n 'postcode' : 'Postcode',\n 'country' : 'Country',\n }\n widgets = {\n 'name' : forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Name of your school or club'}),\n 'postcode' : forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Postcode'}),\n 'country' : CountrySelectWidget(attrs={'class': 'wide'}),\n }\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n self.current_school = kwargs.pop('current_school', None)\n super(OrganisationForm, self).__init__(*args, **kwargs)\n if self.current_school:\n del self.fields['current_password']\n\n def clean(self):\n name = self.cleaned_data.get('name', None)\n postcode = self.cleaned_data.get('postcode', None)\n\n if name and postcode:\n try:\n school = School.objects.get(name=name, postcode=postcode)\n except ObjectDoesNotExist:\n return self.cleaned_data\n\n if not self.current_school or self.current_school.id != school.id:\n raise forms.ValidationError(\n \"There is already a school or club registered with that name and postcode\")\n\n return self.cleaned_data\n\n def clean_postcode(self):\n postcode = self.cleaned_data.get('postcode', None)\n\n if postcode:\n # Basic postcode check for now\n if not len(postcode.replace(' ', '')) > 0:\n raise forms.ValidationError(\"That postcode was not recognised\")\n\n return postcode\n\n def clean_current_password(self):\n current_password = self.cleaned_data.get('current_password', None)\n if not self.user.check_password(current_password):\n raise forms.ValidationError(\"Your password was incorrect\")\n\n\nclass OrganisationJoinForm(forms.Form):\n fuzzy_name = forms.CharField(\n label=\"Search for school or club by name or postcode\",\n widget=forms.TextInput(\n attrs={'placeholder': \"Enrico Fermi High School\"}))\n\n # Note: the reason this is a CharField rather than a ChoiceField is to avoid having to\n # provide choices which was problematic given that the options are dynamically generated.\n chosen_org = forms.CharField(\n label='Select school or club',\n widget=forms.Select(attrs={'class': 'wide'}))\n\n def clean_chosen_org(self):\n chosen_org = self.cleaned_data.get('chosen_org', None)\n\n if chosen_org and not School.objects.filter(id=int(chosen_org)).exists():\n raise forms.ValidationError(\"That school or club was not recognised\")\n\n return chosen_org\n\n", "path": "portal/forms/organisation.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2017, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom django import forms\n\nfrom portal.models import School\n\nfrom django_countries.widgets import CountrySelectWidget\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\nclass OrganisationForm(forms.ModelForm):\n\n current_password = forms.CharField(\n label='Enter your password',\n widget=forms.PasswordInput(attrs={'autocomplete': \"off\"}))\n\n class Meta:\n model = School\n fields = ['name', 'postcode', 'country']\n labels = {\n 'name': \"Name of your school or club\",\n 'postcode': 'Postcode',\n 'country': 'Country',\n }\n widgets = {\n 'name': forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Name of your school or club'}),\n 'postcode': forms.TextInput(attrs={'autocomplete': \"off\", 'placeholder': 'Postcode'}),\n 'country': CountrySelectWidget(attrs={'class': 'wide'}),\n }\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n self.current_school = kwargs.pop('current_school', None)\n super(OrganisationForm, self).__init__(*args, **kwargs)\n if self.current_school:\n del self.fields['current_password']\n\n def clean(self):\n name = self.cleaned_data.get('name', None)\n postcode = self.cleaned_data.get('postcode', None)\n\n if name and postcode:\n try:\n school = School.objects.get(name=name, postcode=postcode)\n except ObjectDoesNotExist:\n return self.cleaned_data\n\n if not self.current_school or self.current_school.id != school.id:\n raise forms.ValidationError(\n \"There is already a school or club registered with that name and postcode\")\n\n return self.cleaned_data\n\n def clean_postcode(self):\n postcode = self.cleaned_data.get('postcode', None)\n\n if postcode:\n if len(postcode.replace(' ', '')) > 10 or len(postcode.replace(' ', '')) == 0:\n raise forms.ValidationError(\"Please enter a valid postcode or ZIP code\")\n\n return postcode\n\n def clean_current_password(self):\n current_password = self.cleaned_data.get('current_password', None)\n if not self.user.check_password(current_password):\n raise forms.ValidationError(\"Your password was incorrect\")\n\n\nclass OrganisationJoinForm(forms.Form):\n fuzzy_name = forms.CharField(\n label=\"Search for school or club by name or postcode\",\n widget=forms.TextInput(\n attrs={'placeholder': \"Enrico Fermi High School\"}))\n\n # Note: the reason this is a CharField rather than a ChoiceField is to avoid having to\n # provide choices which was problematic given that the options are dynamically generated.\n chosen_org = forms.CharField(\n label='Select school or club',\n widget=forms.Select(attrs={'class': 'wide'}))\n\n def clean_chosen_org(self):\n chosen_org = self.cleaned_data.get('chosen_org', None)\n\n if chosen_org and not School.objects.filter(id=int(chosen_org)).exists():\n raise forms.ValidationError(\"That school or club was not recognised\")\n\n return chosen_org\n\n", "path": "portal/forms/organisation.py"}]} | 1,663 | 395 |
gh_patches_debug_1263 | rasdani/github-patches | git_diff | aws__aws-cli-2892 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
- Support use of colorama up to 0.3.8
+ colorama bugfix release 0.3.8 is available and contains no incompatible
changes. There is no need to restrict use to less or equal 0.3.7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import codecs
3 import os.path
4 import re
5 import sys
6
7 from setuptools import setup, find_packages
8
9
10 here = os.path.abspath(os.path.dirname(__file__))
11
12
13 def read(*parts):
14 return codecs.open(os.path.join(here, *parts), 'r').read()
15
16
17 def find_version(*file_paths):
18 version_file = read(*file_paths)
19 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
20 version_file, re.M)
21 if version_match:
22 return version_match.group(1)
23 raise RuntimeError("Unable to find version string.")
24
25
26 requires = ['botocore==1.10.19',
27 'colorama>=0.2.5,<=0.3.7',
28 'docutils>=0.10',
29 'rsa>=3.1.2,<=3.5.0',
30 's3transfer>=0.1.12,<0.2.0',
31 'PyYAML>=3.10,<=3.12']
32
33
34 if sys.version_info[:2] == (2, 6):
35 # For python2.6 we have to require argparse since it
36 # was not in stdlib until 2.7.
37 requires.append('argparse>=1.1')
38
39
40 setup_options = dict(
41 name='awscli',
42 version=find_version("awscli", "__init__.py"),
43 description='Universal Command Line Environment for AWS.',
44 long_description=open('README.rst').read(),
45 author='Amazon Web Services',
46 url='http://aws.amazon.com/cli/',
47 scripts=['bin/aws', 'bin/aws.cmd',
48 'bin/aws_completer', 'bin/aws_zsh_completer.sh',
49 'bin/aws_bash_completer'],
50 packages=find_packages(exclude=['tests*']),
51 package_data={'awscli': ['data/*.json', 'examples/*/*.rst',
52 'examples/*/*/*.rst', 'topics/*.rst',
53 'topics/*.json']},
54 install_requires=requires,
55 extras_require={
56 ':python_version=="2.6"': [
57 'argparse>=1.1',
58 ]
59 },
60 license="Apache License 2.0",
61 classifiers=(
62 'Development Status :: 5 - Production/Stable',
63 'Intended Audience :: Developers',
64 'Intended Audience :: System Administrators',
65 'Natural Language :: English',
66 'License :: OSI Approved :: Apache Software License',
67 'Programming Language :: Python',
68 'Programming Language :: Python :: 2.6',
69 'Programming Language :: Python :: 2.7',
70 'Programming Language :: Python :: 3',
71 'Programming Language :: Python :: 3.3',
72 'Programming Language :: Python :: 3.4',
73 'Programming Language :: Python :: 3.5',
74 'Programming Language :: Python :: 3.6',
75 ),
76 )
77
78 if 'py2exe' in sys.argv:
79 # This will actually give us a py2exe command.
80 import py2exe
81 # And we have some py2exe specific options.
82 setup_options['options'] = {
83 'py2exe': {
84 'optimize': 0,
85 'skip_archive': True,
86 'dll_excludes': ['crypt32.dll'],
87 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',
88 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],
89 }
90 }
91 setup_options['console'] = ['bin/aws']
92
93
94 setup(**setup_options)
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@
requires = ['botocore==1.10.19',
- 'colorama>=0.2.5,<=0.3.7',
+ 'colorama>=0.2.5,<=0.3.9',
'docutils>=0.10',
'rsa>=3.1.2,<=3.5.0',
's3transfer>=0.1.12,<0.2.0',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,7 +24,7 @@\n \n \n requires = ['botocore==1.10.19',\n- 'colorama>=0.2.5,<=0.3.7',\n+ 'colorama>=0.2.5,<=0.3.9',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.1.12,<0.2.0',\n", "issue": "- Support use of colorama up to 0.3.8\n + colorama bugfix release 0.3.8 is available and contains no incompatible\r\n changes. There is no need to restrict use to less or equal 0.3.7\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = ['botocore==1.10.19',\n 'colorama>=0.2.5,<=0.3.7',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.1.12,<0.2.0',\n 'PyYAML>=3.10,<=3.12']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = ['botocore==1.10.19',\n 'colorama>=0.2.5,<=0.3.9',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.1.12,<0.2.0',\n 'PyYAML>=3.10,<=3.12']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}]} | 1,261 | 132 |
gh_patches_debug_21124 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-3464 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
url with Chinese query string return 400
use IE11,url with Chinese query string,return 400.
1. Mitmproxy version: 3.0.0 (release version)
Python version: 3.5.3
Platform: Windows-10-10.0.14393-SP0
SSL version: OpenSSL 1.1.0e 16 Feb 2017
Windows version: 10 10.0.14393 SP0 Multiprocessor Free
2. chrome+mitmdump is fine.
3. but use IE11+mitmdump is error.
4. use IE11 + burpsuite is fine.
5. mitmdump --listen-host 127.0.0.1 --listen-port 8080
Mitmproxy was no hint error, but query string **lc_name** was submitted to the charset difference.
return HTTP 400.
html charset is gb2312.
IE11 developer tools see url http://wlpx.tax-edu.net/jsp/portal/PortalControl?flag=selectCourse&lc_id=42825&lc_name=�������
chrome developer tools see url http://wlpx.tax-edu.net/jsp/portal/PortalControl?flag=selectCourse&lc_id=42825&lc_name=%CD%A8%D3%C3%D6%AA%CA%B6%BA%CD%C4%DC%C1%A6

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/net/http/url.py`
Content:
```
1 import urllib.parse
2 from typing import Sequence
3 from typing import Tuple
4
5 from mitmproxy.net import check
6
7
8 def parse(url):
9 """
10 URL-parsing function that checks that
11 - port is an integer 0-65535
12 - host is a valid IDNA-encoded hostname with no null-bytes
13 - path is valid ASCII
14
15 Args:
16 A URL (as bytes or as unicode)
17
18 Returns:
19 A (scheme, host, port, path) tuple
20
21 Raises:
22 ValueError, if the URL is not properly formatted.
23 """
24 parsed = urllib.parse.urlparse(url)
25
26 if not parsed.hostname:
27 raise ValueError("No hostname given")
28
29 if isinstance(url, bytes):
30 host = parsed.hostname
31
32 # this should not raise a ValueError,
33 # but we try to be very forgiving here and accept just everything.
34 else:
35 host = parsed.hostname.encode("idna")
36 if isinstance(parsed, urllib.parse.ParseResult):
37 parsed = parsed.encode("ascii")
38
39 port = parsed.port # Returns None if port number invalid in Py3.5. Will throw ValueError in Py3.6
40 if not port:
41 port = 443 if parsed.scheme == b"https" else 80
42
43 full_path = urllib.parse.urlunparse(
44 (b"", b"", parsed.path, parsed.params, parsed.query, parsed.fragment)
45 )
46 if not full_path.startswith(b"/"):
47 full_path = b"/" + full_path
48
49 if not check.is_valid_host(host):
50 raise ValueError("Invalid Host")
51
52 return parsed.scheme, host, port, full_path
53
54
55 def unparse(scheme, host, port, path=""):
56 """
57 Returns a URL string, constructed from the specified components.
58
59 Args:
60 All args must be str.
61 """
62 if path == "*":
63 path = ""
64 return "%s://%s%s" % (scheme, hostport(scheme, host, port), path)
65
66
67 def encode(s: Sequence[Tuple[str, str]], similar_to: str=None) -> str:
68 """
69 Takes a list of (key, value) tuples and returns a urlencoded string.
70 If similar_to is passed, the output is formatted similar to the provided urlencoded string.
71 """
72
73 remove_trailing_equal = False
74 if similar_to:
75 remove_trailing_equal = any("=" not in param for param in similar_to.split("&"))
76
77 encoded = urllib.parse.urlencode(s, False, errors="surrogateescape")
78
79 if encoded and remove_trailing_equal:
80 encoded = encoded.replace("=&", "&")
81 if encoded[-1] == '=':
82 encoded = encoded[:-1]
83
84 return encoded
85
86
87 def decode(s):
88 """
89 Takes a urlencoded string and returns a list of surrogate-escaped (key, value) tuples.
90 """
91 return urllib.parse.parse_qsl(s, keep_blank_values=True, errors='surrogateescape')
92
93
94 def quote(b: str, safe: str="/") -> str:
95 """
96 Returns:
97 An ascii-encodable str.
98 """
99 return urllib.parse.quote(b, safe=safe, errors="surrogateescape")
100
101
102 def unquote(s: str) -> str:
103 """
104 Args:
105 s: A surrogate-escaped str
106 Returns:
107 A surrogate-escaped str
108 """
109 return urllib.parse.unquote(s, errors="surrogateescape")
110
111
112 def hostport(scheme, host, port):
113 """
114 Returns the host component, with a port specifcation if needed.
115 """
116 if (port, scheme) in [(80, "http"), (443, "https"), (80, b"http"), (443, b"https")]:
117 return host
118 else:
119 if isinstance(host, bytes):
120 return b"%s:%d" % (host, port)
121 else:
122 return "%s:%d" % (host, port)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/net/http/url.py b/mitmproxy/net/http/url.py
--- a/mitmproxy/net/http/url.py
+++ b/mitmproxy/net/http/url.py
@@ -21,16 +21,25 @@
Raises:
ValueError, if the URL is not properly formatted.
"""
- parsed = urllib.parse.urlparse(url)
+ # Size of Ascii character after encoding is 1 byte which is same as its size
+ # But non-Ascii character's size after encoding will be more than its size
+ def ascii_check(l):
+ if len(l) == len(str(l).encode()):
+ return True
+ return False
+
+ if isinstance(url, bytes):
+ url = url.decode()
+ if not ascii_check(url):
+ url = urllib.parse.urlsplit(url)
+ url = list(url)
+ url[3] = urllib.parse.quote(url[3])
+ url = urllib.parse.urlunsplit(url)
+ parsed = urllib.parse.urlparse(url)
if not parsed.hostname:
raise ValueError("No hostname given")
- if isinstance(url, bytes):
- host = parsed.hostname
-
- # this should not raise a ValueError,
- # but we try to be very forgiving here and accept just everything.
else:
host = parsed.hostname.encode("idna")
if isinstance(parsed, urllib.parse.ParseResult):
| {"golden_diff": "diff --git a/mitmproxy/net/http/url.py b/mitmproxy/net/http/url.py\n--- a/mitmproxy/net/http/url.py\n+++ b/mitmproxy/net/http/url.py\n@@ -21,16 +21,25 @@\n Raises:\n ValueError, if the URL is not properly formatted.\n \"\"\"\n- parsed = urllib.parse.urlparse(url)\n+ # Size of Ascii character after encoding is 1 byte which is same as its size\n+ # But non-Ascii character's size after encoding will be more than its size\n+ def ascii_check(l):\n+ if len(l) == len(str(l).encode()):\n+ return True\n+ return False\n+\n+ if isinstance(url, bytes):\n+ url = url.decode()\n+ if not ascii_check(url):\n+ url = urllib.parse.urlsplit(url)\n+ url = list(url)\n+ url[3] = urllib.parse.quote(url[3])\n+ url = urllib.parse.urlunsplit(url)\n \n+ parsed = urllib.parse.urlparse(url)\n if not parsed.hostname:\n raise ValueError(\"No hostname given\")\n \n- if isinstance(url, bytes):\n- host = parsed.hostname\n-\n- # this should not raise a ValueError,\n- # but we try to be very forgiving here and accept just everything.\n else:\n host = parsed.hostname.encode(\"idna\")\n if isinstance(parsed, urllib.parse.ParseResult):\n", "issue": "url with Chinese query string return 400\nuse IE11,url with Chinese query string,return 400.\r\n\r\n1. Mitmproxy version: 3.0.0 (release version)\r\n Python version: 3.5.3\r\n Platform: Windows-10-10.0.14393-SP0\r\n SSL version: OpenSSL 1.1.0e 16 Feb 2017 \r\n Windows version: 10 10.0.14393 SP0 Multiprocessor Free\r\n2. chrome+mitmdump is fine.\r\n3. but use IE11+mitmdump is error. \r\n4. use IE11 + burpsuite is fine.\r\n5. mitmdump --listen-host 127.0.0.1 --listen-port 8080\r\n\r\nMitmproxy was no hint error, but query string **lc_name** was submitted to the charset difference.\r\nreturn HTTP 400.\r\nhtml charset is gb2312.\r\n\r\nIE11 developer tools see url http://wlpx.tax-edu.net/jsp/portal/PortalControl?flag=selectCourse&lc_id=42825&lc_name=\u00e9\ufffd\ufffd\u00e7\ufffd\u00a8\u00e7\ufffd\u00a5\u00e8\u00af\ufffd\u00e5\ufffd\ufffd\u00e8\ufffd\u00bd\u00e5\ufffd\ufffd\r\n\r\nchrome developer tools see url http://wlpx.tax-edu.net/jsp/portal/PortalControl?flag=selectCourse&lc_id=42825&lc_name=%CD%A8%D3%C3%D6%AA%CA%B6%BA%CD%C4%DC%C1%A6\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import urllib.parse\nfrom typing import Sequence\nfrom typing import Tuple\n\nfrom mitmproxy.net import check\n\n\ndef parse(url):\n \"\"\"\n URL-parsing function that checks that\n - port is an integer 0-65535\n - host is a valid IDNA-encoded hostname with no null-bytes\n - path is valid ASCII\n\n Args:\n A URL (as bytes or as unicode)\n\n Returns:\n A (scheme, host, port, path) tuple\n\n Raises:\n ValueError, if the URL is not properly formatted.\n \"\"\"\n parsed = urllib.parse.urlparse(url)\n\n if not parsed.hostname:\n raise ValueError(\"No hostname given\")\n\n if isinstance(url, bytes):\n host = parsed.hostname\n\n # this should not raise a ValueError,\n # but we try to be very forgiving here and accept just everything.\n else:\n host = parsed.hostname.encode(\"idna\")\n if isinstance(parsed, urllib.parse.ParseResult):\n parsed = parsed.encode(\"ascii\")\n\n port = parsed.port # Returns None if port number invalid in Py3.5. Will throw ValueError in Py3.6\n if not port:\n port = 443 if parsed.scheme == b\"https\" else 80\n\n full_path = urllib.parse.urlunparse(\n (b\"\", b\"\", parsed.path, parsed.params, parsed.query, parsed.fragment)\n )\n if not full_path.startswith(b\"/\"):\n full_path = b\"/\" + full_path\n\n if not check.is_valid_host(host):\n raise ValueError(\"Invalid Host\")\n\n return parsed.scheme, host, port, full_path\n\n\ndef unparse(scheme, host, port, path=\"\"):\n \"\"\"\n Returns a URL string, constructed from the specified components.\n\n Args:\n All args must be str.\n \"\"\"\n if path == \"*\":\n path = \"\"\n return \"%s://%s%s\" % (scheme, hostport(scheme, host, port), path)\n\n\ndef encode(s: Sequence[Tuple[str, str]], similar_to: str=None) -> str:\n \"\"\"\n Takes a list of (key, value) tuples and returns a urlencoded string.\n If similar_to is passed, the output is formatted similar to the provided urlencoded string.\n \"\"\"\n\n remove_trailing_equal = False\n if similar_to:\n remove_trailing_equal = any(\"=\" not in param for param in similar_to.split(\"&\"))\n\n encoded = urllib.parse.urlencode(s, False, errors=\"surrogateescape\")\n\n if encoded and remove_trailing_equal:\n encoded = encoded.replace(\"=&\", \"&\")\n if encoded[-1] == '=':\n encoded = encoded[:-1]\n\n return encoded\n\n\ndef decode(s):\n \"\"\"\n Takes a urlencoded string and returns a list of surrogate-escaped (key, value) tuples.\n \"\"\"\n return urllib.parse.parse_qsl(s, keep_blank_values=True, errors='surrogateescape')\n\n\ndef quote(b: str, safe: str=\"/\") -> str:\n \"\"\"\n Returns:\n An ascii-encodable str.\n \"\"\"\n return urllib.parse.quote(b, safe=safe, errors=\"surrogateescape\")\n\n\ndef unquote(s: str) -> str:\n \"\"\"\n Args:\n s: A surrogate-escaped str\n Returns:\n A surrogate-escaped str\n \"\"\"\n return urllib.parse.unquote(s, errors=\"surrogateescape\")\n\n\ndef hostport(scheme, host, port):\n \"\"\"\n Returns the host component, with a port specifcation if needed.\n \"\"\"\n if (port, scheme) in [(80, \"http\"), (443, \"https\"), (80, b\"http\"), (443, b\"https\")]:\n return host\n else:\n if isinstance(host, bytes):\n return b\"%s:%d\" % (host, port)\n else:\n return \"%s:%d\" % (host, port)\n", "path": "mitmproxy/net/http/url.py"}], "after_files": [{"content": "import urllib.parse\nfrom typing import Sequence\nfrom typing import Tuple\n\nfrom mitmproxy.net import check\n\n\ndef parse(url):\n \"\"\"\n URL-parsing function that checks that\n - port is an integer 0-65535\n - host is a valid IDNA-encoded hostname with no null-bytes\n - path is valid ASCII\n\n Args:\n A URL (as bytes or as unicode)\n\n Returns:\n A (scheme, host, port, path) tuple\n\n Raises:\n ValueError, if the URL is not properly formatted.\n \"\"\"\n # Size of Ascii character after encoding is 1 byte which is same as its size\n # But non-Ascii character's size after encoding will be more than its size\n def ascii_check(l):\n if len(l) == len(str(l).encode()):\n return True\n return False\n\n if isinstance(url, bytes):\n url = url.decode()\n if not ascii_check(url):\n url = urllib.parse.urlsplit(url)\n url = list(url)\n url[3] = urllib.parse.quote(url[3])\n url = urllib.parse.urlunsplit(url)\n\n parsed = urllib.parse.urlparse(url)\n if not parsed.hostname:\n raise ValueError(\"No hostname given\")\n\n else:\n host = parsed.hostname.encode(\"idna\")\n if isinstance(parsed, urllib.parse.ParseResult):\n parsed = parsed.encode(\"ascii\")\n\n port = parsed.port # Returns None if port number invalid in Py3.5. Will throw ValueError in Py3.6\n if not port:\n port = 443 if parsed.scheme == b\"https\" else 80\n\n full_path = urllib.parse.urlunparse(\n (b\"\", b\"\", parsed.path, parsed.params, parsed.query, parsed.fragment)\n )\n if not full_path.startswith(b\"/\"):\n full_path = b\"/\" + full_path\n\n if not check.is_valid_host(host):\n raise ValueError(\"Invalid Host\")\n\n return parsed.scheme, host, port, full_path\n\n\ndef unparse(scheme, host, port, path=\"\"):\n \"\"\"\n Returns a URL string, constructed from the specified components.\n\n Args:\n All args must be str.\n \"\"\"\n if path == \"*\":\n path = \"\"\n return \"%s://%s%s\" % (scheme, hostport(scheme, host, port), path)\n\n\ndef encode(s: Sequence[Tuple[str, str]], similar_to: str=None) -> str:\n \"\"\"\n Takes a list of (key, value) tuples and returns a urlencoded string.\n If similar_to is passed, the output is formatted similar to the provided urlencoded string.\n \"\"\"\n\n remove_trailing_equal = False\n if similar_to:\n remove_trailing_equal = any(\"=\" not in param for param in similar_to.split(\"&\"))\n\n encoded = urllib.parse.urlencode(s, False, errors=\"surrogateescape\")\n\n if encoded and remove_trailing_equal:\n encoded = encoded.replace(\"=&\", \"&\")\n if encoded[-1] == '=':\n encoded = encoded[:-1]\n\n return encoded\n\n\ndef decode(s):\n \"\"\"\n Takes a urlencoded string and returns a list of surrogate-escaped (key, value) tuples.\n \"\"\"\n return urllib.parse.parse_qsl(s, keep_blank_values=True, errors='surrogateescape')\n\n\ndef quote(b: str, safe: str=\"/\") -> str:\n \"\"\"\n Returns:\n An ascii-encodable str.\n \"\"\"\n return urllib.parse.quote(b, safe=safe, errors=\"surrogateescape\")\n\n\ndef unquote(s: str) -> str:\n \"\"\"\n Args:\n s: A surrogate-escaped str\n Returns:\n A surrogate-escaped str\n \"\"\"\n return urllib.parse.unquote(s, errors=\"surrogateescape\")\n\n\ndef hostport(scheme, host, port):\n \"\"\"\n Returns the host component, with a port specifcation if needed.\n \"\"\"\n if (port, scheme) in [(80, \"http\"), (443, \"https\"), (80, b\"http\"), (443, b\"https\")]:\n return host\n else:\n if isinstance(host, bytes):\n return b\"%s:%d\" % (host, port)\n else:\n return \"%s:%d\" % (host, port)\n", "path": "mitmproxy/net/http/url.py"}]} | 1,788 | 307 |
gh_patches_debug_17447 | rasdani/github-patches | git_diff | wagtail__wagtail-10039 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
🎛️ Migrate site switcher to use Stimulus approach `ActionController`
> ℹ️ **Part of the [Stimulus 🎛️ RFC 78](https://github.com/wagtail/rfcs/pull/78)**
### Is your proposal related to a problem?
There is a custom JavaScript implementation to add behaviour to select drop-down that will update the location (URL) when changed.
This approach should be very close to what we are already doing with the `SubmitController` so let's do a a bit of clean up to avoid too much ad-hoc JS.
### Describe the solution you'd like
* Update the implementation of `client/src/controllers/SubmitController.ts` to allow for a new [Stimulus Value](https://stimulus.hotwired.dev/reference/values) called `updateAction`.
* When in use, the existing method `submit` will update the form's action value before submitting from the source element's value. `form.setAttribute('action', this.element.value); // example`
* Essentially we want to use the form `get` submit to do the location change, instead of updating the `window.location.url`.
* However, we need to ensure the right page is loaded, hence we need to revise `action` dynamically when the user selects the option.
* Remove the jQuery implementation completely [`wagtail/contrib/settings/static_src/wagtailsettings/js/site-switcher.js`](https://github.com/wagtail/wagtail/blob/main/wagtail/contrib/settings/static_src/wagtailsettings/js/site-switcher.js)
* Update the select field to have the suitable data attributes [`wagtail/contrib/settings/forms.py`](https://github.com/wagtail/wagtail/blob/main/wagtail/contrib/settings/forms.py#L23).
* Unit tests in JavaScript **must** be included with a PR.
* Validate that the 'current' option in the select drop-down for the site switcher is still function, so that selecting it will not do anything. See wagtail/contrib/settings/forms.py (Update: This is not a huge problem, the browser will not trigger a `change` event if the value has not changed).
#### Example HTML
```html
<form method="get" id="settings-site-switch" novalidate>
<select
name="site-switcher"
data-controller="w-submit"
data-action="change->w-submit#submit"
data-w-submit-update-action-value="true"
>
<option value="/path/to/current-site" selected>current.com</option>
<option value="/path/to/other-site">other.com</option>
</select>
</form>
```
### Additional notes
* Remember that Site Settings is not available in the bakery demo by default, you will need to add this locally to validate the behaviour https://docs.wagtail.org/en/stable/reference/contrib/settings.html
* `AutoFieldController` was added in this PR https://github.com/wagtail/wagtail/pull/9337 and then renamed to `SubmitController` in https://github.com/wagtail/wagtail/pull/10098
* The actual `form` HTML is located in [`wagtail/contrib/settings/templates/wagtailsettings/edit.html`](https://github.com/wagtail/wagtail/blob/main/wagtail/contrib/settings/templates/wagtailsettings/edit.html) - this HTML should not need changes but good to note
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/contrib/settings/forms.py`
Content:
```
1 from django import forms
2 from django.urls import reverse
3 from django.utils.translation import gettext_lazy as _
4
5 from wagtail.admin.staticfiles import versioned_static
6 from wagtail.models import Site
7
8
9 class SiteSwitchForm(forms.Form):
10 site = forms.ChoiceField(choices=[])
11
12 @property
13 def media(self):
14 return forms.Media(
15 js=[
16 versioned_static("wagtailsettings/js/site-switcher.js"),
17 ]
18 )
19
20 def __init__(self, current_site, model, **kwargs):
21 initial_data = {"site": self.get_change_url(current_site, model)}
22 super().__init__(initial=initial_data, **kwargs)
23 self.fields["site"].choices = [
24 (
25 self.get_change_url(site, model),
26 (
27 site.hostname + " [{}]".format(_("default"))
28 if site.is_default_site
29 else site.hostname
30 ),
31 )
32 for site in Site.objects.all()
33 ]
34
35 @classmethod
36 def get_change_url(cls, site, model):
37 return reverse(
38 "wagtailsettings:edit",
39 args=[model._meta.app_label, model._meta.model_name, site.pk],
40 )
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/contrib/settings/forms.py b/wagtail/contrib/settings/forms.py
--- a/wagtail/contrib/settings/forms.py
+++ b/wagtail/contrib/settings/forms.py
@@ -2,20 +2,19 @@
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
-from wagtail.admin.staticfiles import versioned_static
from wagtail.models import Site
class SiteSwitchForm(forms.Form):
- site = forms.ChoiceField(choices=[])
-
- @property
- def media(self):
- return forms.Media(
- js=[
- versioned_static("wagtailsettings/js/site-switcher.js"),
- ]
- )
+ site = forms.ChoiceField(
+ choices=[],
+ widget=forms.Select(
+ attrs={
+ "data-controller": "w-action",
+ "data-action": "change->w-action#redirect",
+ }
+ ),
+ )
def __init__(self, current_site, model, **kwargs):
initial_data = {"site": self.get_change_url(current_site, model)}
| {"golden_diff": "diff --git a/wagtail/contrib/settings/forms.py b/wagtail/contrib/settings/forms.py\n--- a/wagtail/contrib/settings/forms.py\n+++ b/wagtail/contrib/settings/forms.py\n@@ -2,20 +2,19 @@\n from django.urls import reverse\n from django.utils.translation import gettext_lazy as _\n \n-from wagtail.admin.staticfiles import versioned_static\n from wagtail.models import Site\n \n \n class SiteSwitchForm(forms.Form):\n- site = forms.ChoiceField(choices=[])\n-\n- @property\n- def media(self):\n- return forms.Media(\n- js=[\n- versioned_static(\"wagtailsettings/js/site-switcher.js\"),\n- ]\n- )\n+ site = forms.ChoiceField(\n+ choices=[],\n+ widget=forms.Select(\n+ attrs={\n+ \"data-controller\": \"w-action\",\n+ \"data-action\": \"change->w-action#redirect\",\n+ }\n+ ),\n+ )\n \n def __init__(self, current_site, model, **kwargs):\n initial_data = {\"site\": self.get_change_url(current_site, model)}\n", "issue": "\ud83c\udf9b\ufe0f Migrate site switcher to use Stimulus approach `ActionController`\n> \u2139\ufe0f **Part of the [Stimulus \ud83c\udf9b\ufe0f RFC 78](https://github.com/wagtail/rfcs/pull/78)**\r\n\r\n### Is your proposal related to a problem?\r\n\r\nThere is a custom JavaScript implementation to add behaviour to select drop-down that will update the location (URL) when changed.\r\n\r\nThis approach should be very close to what we are already doing with the `SubmitController` so let's do a a bit of clean up to avoid too much ad-hoc JS.\r\n\r\n### Describe the solution you'd like\r\n\r\n* Update the implementation of `client/src/controllers/SubmitController.ts` to allow for a new [Stimulus Value](https://stimulus.hotwired.dev/reference/values) called `updateAction`.\r\n * When in use, the existing method `submit` will update the form's action value before submitting from the source element's value. `form.setAttribute('action', this.element.value); // example`\r\n * Essentially we want to use the form `get` submit to do the location change, instead of updating the `window.location.url`.\r\n * However, we need to ensure the right page is loaded, hence we need to revise `action` dynamically when the user selects the option.\r\n* Remove the jQuery implementation completely [`wagtail/contrib/settings/static_src/wagtailsettings/js/site-switcher.js`](https://github.com/wagtail/wagtail/blob/main/wagtail/contrib/settings/static_src/wagtailsettings/js/site-switcher.js)\r\n* Update the select field to have the suitable data attributes [`wagtail/contrib/settings/forms.py`](https://github.com/wagtail/wagtail/blob/main/wagtail/contrib/settings/forms.py#L23).\r\n* Unit tests in JavaScript **must** be included with a PR.\r\n* Validate that the 'current' option in the select drop-down for the site switcher is still function, so that selecting it will not do anything. See wagtail/contrib/settings/forms.py (Update: This is not a huge problem, the browser will not trigger a `change` event if the value has not changed).\r\n\r\n#### Example HTML\r\n\r\n```html\r\n<form method=\"get\" id=\"settings-site-switch\" novalidate>\r\n <select\r\n name=\"site-switcher\"\r\n data-controller=\"w-submit\"\r\n data-action=\"change->w-submit#submit\"\r\n data-w-submit-update-action-value=\"true\"\r\n >\r\n <option value=\"/path/to/current-site\" selected>current.com</option>\r\n <option value=\"/path/to/other-site\">other.com</option>\r\n </select>\r\n</form>\r\n```\r\n\r\n\r\n### Additional notes\r\n\r\n* Remember that Site Settings is not available in the bakery demo by default, you will need to add this locally to validate the behaviour https://docs.wagtail.org/en/stable/reference/contrib/settings.html\r\n* `AutoFieldController` was added in this PR https://github.com/wagtail/wagtail/pull/9337 and then renamed to `SubmitController` in https://github.com/wagtail/wagtail/pull/10098\r\n* The actual `form` HTML is located in [`wagtail/contrib/settings/templates/wagtailsettings/edit.html`](https://github.com/wagtail/wagtail/blob/main/wagtail/contrib/settings/templates/wagtailsettings/edit.html) - this HTML should not need changes but good to note\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom wagtail.admin.staticfiles import versioned_static\nfrom wagtail.models import Site\n\n\nclass SiteSwitchForm(forms.Form):\n site = forms.ChoiceField(choices=[])\n\n @property\n def media(self):\n return forms.Media(\n js=[\n versioned_static(\"wagtailsettings/js/site-switcher.js\"),\n ]\n )\n\n def __init__(self, current_site, model, **kwargs):\n initial_data = {\"site\": self.get_change_url(current_site, model)}\n super().__init__(initial=initial_data, **kwargs)\n self.fields[\"site\"].choices = [\n (\n self.get_change_url(site, model),\n (\n site.hostname + \" [{}]\".format(_(\"default\"))\n if site.is_default_site\n else site.hostname\n ),\n )\n for site in Site.objects.all()\n ]\n\n @classmethod\n def get_change_url(cls, site, model):\n return reverse(\n \"wagtailsettings:edit\",\n args=[model._meta.app_label, model._meta.model_name, site.pk],\n )\n", "path": "wagtail/contrib/settings/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom wagtail.models import Site\n\n\nclass SiteSwitchForm(forms.Form):\n site = forms.ChoiceField(\n choices=[],\n widget=forms.Select(\n attrs={\n \"data-controller\": \"w-action\",\n \"data-action\": \"change->w-action#redirect\",\n }\n ),\n )\n\n def __init__(self, current_site, model, **kwargs):\n initial_data = {\"site\": self.get_change_url(current_site, model)}\n super().__init__(initial=initial_data, **kwargs)\n self.fields[\"site\"].choices = [\n (\n self.get_change_url(site, model),\n (\n site.hostname + \" [{}]\".format(_(\"default\"))\n if site.is_default_site\n else site.hostname\n ),\n )\n for site in Site.objects.all()\n ]\n\n @classmethod\n def get_change_url(cls, site, model):\n return reverse(\n \"wagtailsettings:edit\",\n args=[model._meta.app_label, model._meta.model_name, site.pk],\n )\n", "path": "wagtail/contrib/settings/forms.py"}]} | 1,312 | 242 |
gh_patches_debug_15647 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1812 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Discuss type for renewable category for Ukraine UA
Currently, there is no breakdown for the Ukrainian renewable category provided on https://ua.energy/diyalnist/dyspetcherska-informatsiya/dobovyj-grafik-vyrobnytstva-spozhyvannya-e-e/
The renewable category (ВДЕ) is mapped as "wind" in the parser, because wind used to be the dominant source in the past.
Going through the last few days on the website, you will notice a very clear solar peak at noon (~1.200. MW) each day. Wind at nighttimes reaches a maximum value of ~400 MW, mostly it is around 200 MW.
Here is an example for yesterday:

The installed capacity of solar grew very fast, and will continue because it's cheap and the potential in UA is huge:

Some suggestions to deal with this situation:
1. Any artificial boundaries (depending on x-axis-time or y-axis-megawatts or both) pushing production to wind or solar?
Like "from 06:00 to 18:00" -> solar if P > 200 MW, else wind".
2. Put renewables to unknown category with a mixed carbon intensity (looking at the intalled capacity, 50% wind : 50% solar seems reasonable).
3. actively search for a breakdown of wind and solar
4. ask the data provider for a breakdown
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/UA.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import arrow
4 import dateutil
5 import requests
6
7 """
8 tec - same as `tes` but also working as central heater,
9 main fuel is gas, in critical situations - black oil
10 gesgaes - hydro run of river and poundage
11 consumptiongaespump - hydro pumped storage
12 vde - wind + solar, mostly wind
13
14 no data for biomass, solar and oil
15 """
16 MAP_GENERATION = {
17 'aes': 'nuclear',
18 'tec': 'gas',
19 'tes': 'coal',
20 'vde': 'wind',
21 'biomass': 'biomass',
22 'gesgaes': 'hydro',
23 'solar': 'solar',
24 'oil': 'oil',
25 'geothermal': 'geothermal',
26 }
27
28 MAP_STORAGE = {
29 'consumptiongaespump': 'hydro',
30 }
31
32 tz = 'Europe/Kiev'
33
34
35 def fetch_production(zone_key='UA', session=None, target_datetime=None, logger=None):
36 if target_datetime:
37 raise NotImplementedError('This parser is not yet able to parse past dates')
38 r = session or requests.session()
39
40 data = []
41 today = arrow.now(tz=tz).format('DD.MM.YYYY')
42 url = 'https://ua.energy/wp-admin/admin-ajax.php'
43 postdata = {
44 'action': 'get_data_oes',
45 'report_date': today,
46 'type': 'day'
47 }
48
49 response = r.post(url, postdata)
50
51 for serie in response.json():
52 row = {
53 'zoneKey': zone_key,
54 'production': {},
55 'storage': {},
56 'source': 'ua.energy'
57 }
58
59 # Storage
60 if 'consumptiongaespump' in serie:
61 row['storage']['hydro'] = serie['consumptiongaespump'] * -1
62
63 # Production
64 for k, v in MAP_GENERATION.items():
65 if k in serie:
66 row['production'][v] = serie[k]
67 else:
68 row['production'][v] = 0.0
69
70 # Date
71 date = arrow.get('%s %s' % (today, serie['hour']), 'DD.MM.YYYY HH:mm')
72 row['datetime'] = date.replace(tzinfo=dateutil.tz.gettz(tz)).datetime
73
74 data.append(row)
75 return data
76
77
78 if __name__ == '__main__':
79 print(fetch_production())
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/UA.py b/parsers/UA.py
--- a/parsers/UA.py
+++ b/parsers/UA.py
@@ -9,18 +9,18 @@
main fuel is gas, in critical situations - black oil
gesgaes - hydro run of river and poundage
consumptiongaespump - hydro pumped storage
-vde - wind + solar, mostly wind
+vde - renewable sources - mostly wind at nighttimes and solar peaks during the day
-no data for biomass, solar and oil
"""
MAP_GENERATION = {
'aes': 'nuclear',
'tec': 'gas',
'tes': 'coal',
- 'vde': 'wind',
+ 'vde': 'unknown',
'biomass': 'biomass',
'gesgaes': 'hydro',
'solar': 'solar',
+ 'wind': 'wind',
'oil': 'oil',
'geothermal': 'geothermal',
}
| {"golden_diff": "diff --git a/parsers/UA.py b/parsers/UA.py\n--- a/parsers/UA.py\n+++ b/parsers/UA.py\n@@ -9,18 +9,18 @@\n main fuel is gas, in critical situations - black oil\n gesgaes - hydro run of river and poundage\n consumptiongaespump - hydro pumped storage\n-vde - wind + solar, mostly wind\n+vde - renewable sources - mostly wind at nighttimes and solar peaks during the day\n \n-no data for biomass, solar and oil\n \"\"\"\n MAP_GENERATION = {\n 'aes': 'nuclear',\n 'tec': 'gas',\n 'tes': 'coal',\n- 'vde': 'wind',\n+ 'vde': 'unknown',\n 'biomass': 'biomass',\n 'gesgaes': 'hydro',\n 'solar': 'solar',\n+ 'wind': 'wind',\n 'oil': 'oil',\n 'geothermal': 'geothermal',\n }\n", "issue": "Discuss type for renewable category for Ukraine UA\nCurrently, there is no breakdown for the Ukrainian renewable category provided on https://ua.energy/diyalnist/dyspetcherska-informatsiya/dobovyj-grafik-vyrobnytstva-spozhyvannya-e-e/\r\n\r\nThe renewable category (\u0412\u0414\u0415) is mapped as \"wind\" in the parser, because wind used to be the dominant source in the past.\r\nGoing through the last few days on the website, you will notice a very clear solar peak at noon (~1.200. MW) each day. Wind at nighttimes reaches a maximum value of ~400 MW, mostly it is around 200 MW.\r\n\r\nHere is an example for yesterday:\r\n\r\n\r\nThe installed capacity of solar grew very fast, and will continue because it's cheap and the potential in UA is huge:\r\n\r\n\r\nSome suggestions to deal with this situation:\r\n1. Any artificial boundaries (depending on x-axis-time or y-axis-megawatts or both) pushing production to wind or solar?\r\nLike \"from 06:00 to 18:00\" -> solar if P > 200 MW, else wind\". \r\n2. Put renewables to unknown category with a mixed carbon intensity (looking at the intalled capacity, 50% wind : 50% solar seems reasonable).\r\n3. actively search for a breakdown of wind and solar\r\n4. ask the data provider for a breakdown\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport arrow\nimport dateutil\nimport requests\n\n\"\"\"\ntec - same as `tes` but also working as central heater,\n main fuel is gas, in critical situations - black oil\ngesgaes - hydro run of river and poundage\nconsumptiongaespump - hydro pumped storage\nvde - wind + solar, mostly wind\n\nno data for biomass, solar and oil\n\"\"\"\nMAP_GENERATION = {\n 'aes': 'nuclear',\n 'tec': 'gas',\n 'tes': 'coal',\n 'vde': 'wind',\n 'biomass': 'biomass',\n 'gesgaes': 'hydro',\n 'solar': 'solar',\n 'oil': 'oil',\n 'geothermal': 'geothermal',\n}\n\nMAP_STORAGE = {\n 'consumptiongaespump': 'hydro',\n}\n\ntz = 'Europe/Kiev'\n\n\ndef fetch_production(zone_key='UA', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n r = session or requests.session()\n\n data = []\n today = arrow.now(tz=tz).format('DD.MM.YYYY')\n url = 'https://ua.energy/wp-admin/admin-ajax.php'\n postdata = {\n 'action': 'get_data_oes',\n 'report_date': today,\n 'type': 'day'\n }\n\n response = r.post(url, postdata)\n\n for serie in response.json():\n row = {\n 'zoneKey': zone_key,\n 'production': {},\n 'storage': {},\n 'source': 'ua.energy'\n }\n\n # Storage\n if 'consumptiongaespump' in serie:\n row['storage']['hydro'] = serie['consumptiongaespump'] * -1\n\n # Production\n for k, v in MAP_GENERATION.items():\n if k in serie:\n row['production'][v] = serie[k]\n else:\n row['production'][v] = 0.0\n\n # Date\n date = arrow.get('%s %s' % (today, serie['hour']), 'DD.MM.YYYY HH:mm')\n row['datetime'] = date.replace(tzinfo=dateutil.tz.gettz(tz)).datetime\n\n data.append(row)\n return data\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/UA.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport arrow\nimport dateutil\nimport requests\n\n\"\"\"\ntec - same as `tes` but also working as central heater,\n main fuel is gas, in critical situations - black oil\ngesgaes - hydro run of river and poundage\nconsumptiongaespump - hydro pumped storage\nvde - renewable sources - mostly wind at nighttimes and solar peaks during the day\n\n\"\"\"\nMAP_GENERATION = {\n 'aes': 'nuclear',\n 'tec': 'gas',\n 'tes': 'coal',\n 'vde': 'unknown',\n 'biomass': 'biomass',\n 'gesgaes': 'hydro',\n 'solar': 'solar',\n 'wind': 'wind',\n 'oil': 'oil',\n 'geothermal': 'geothermal',\n}\n\nMAP_STORAGE = {\n 'consumptiongaespump': 'hydro',\n}\n\ntz = 'Europe/Kiev'\n\n\ndef fetch_production(zone_key='UA', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n r = session or requests.session()\n\n data = []\n today = arrow.now(tz=tz).format('DD.MM.YYYY')\n url = 'https://ua.energy/wp-admin/admin-ajax.php'\n postdata = {\n 'action': 'get_data_oes',\n 'report_date': today,\n 'type': 'day'\n }\n\n response = r.post(url, postdata)\n\n for serie in response.json():\n row = {\n 'zoneKey': zone_key,\n 'production': {},\n 'storage': {},\n 'source': 'ua.energy'\n }\n\n # Storage\n if 'consumptiongaespump' in serie:\n row['storage']['hydro'] = serie['consumptiongaespump'] * -1\n\n # Production\n for k, v in MAP_GENERATION.items():\n if k in serie:\n row['production'][v] = serie[k]\n else:\n row['production'][v] = 0.0\n\n # Date\n date = arrow.get('%s %s' % (today, serie['hour']), 'DD.MM.YYYY HH:mm')\n row['datetime'] = date.replace(tzinfo=dateutil.tz.gettz(tz)).datetime\n\n data.append(row)\n return data\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/UA.py"}]} | 1,364 | 215 |
gh_patches_debug_19934 | rasdani/github-patches | git_diff | Mailu__Mailu-1599 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hardcoded http://admin/ in fetchmail.py
I've tweaked ``docker-compose.yml`` so that all my containers related to ``mailu`` are prefixed by ``mailu-``, in order to pro-actively avoid conflict with any other containers I may eventually define in future.
However, the hardcode ``http://admin/...`` below causes failure in ``fetchmail``, since my container is now named ``mailu-admin`` in my ``docker-compose.yml``, not ``admin`` as the code supposes it should be.
```
./services/fetchmail/fetchmail.py:47: fetches = requests.get("http://admin/internal/fetch").json()
./services/fetchmail/fetchmail.py:85: requests.post("http://admin/internal/fetch/{}".format(fetch["id"]),
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optional/fetchmail/fetchmail.py`
Content:
```
1 #!/usr/bin/python3
2
3 import time
4 import os
5 import tempfile
6 import shlex
7 import subprocess
8 import re
9 import requests
10 import sys
11 import traceback
12
13
14 FETCHMAIL = """
15 fetchmail -N \
16 --sslcertck --sslcertpath /etc/ssl/certs \
17 -f {}
18 """
19
20
21 RC_LINE = """
22 poll "{host}" proto {protocol} port {port}
23 user "{username}" password "{password}"
24 is "{user_email}"
25 smtphost "{smtphost}"
26 {options}
27 """
28
29
30 def extract_host_port(host_and_port, default_port):
31 host, _, port = re.match('^(.*)(:([0-9]*))?$', host_and_port).groups()
32 return host, int(port) if port else default_port
33
34
35 def escape_rc_string(arg):
36 return "".join("\\x%2x" % ord(char) for char in arg)
37
38
39 def fetchmail(fetchmailrc):
40 with tempfile.NamedTemporaryFile() as handler:
41 handler.write(fetchmailrc.encode("utf8"))
42 handler.flush()
43 command = FETCHMAIL.format(shlex.quote(handler.name))
44 output = subprocess.check_output(command, shell=True)
45 return output
46
47
48 def run(debug):
49 try:
50 fetches = requests.get("http://admin/internal/fetch").json()
51 smtphost, smtpport = extract_host_port(os.environ.get("HOST_SMTP", "smtp"), None)
52 if smtpport is None:
53 smtphostport = smtphost
54 else:
55 smtphostport = "%s/%d" % (smtphost, smtpport)
56 for fetch in fetches:
57 fetchmailrc = ""
58 options = "options antispam 501, 504, 550, 553, 554"
59 options += " ssl" if fetch["tls"] else ""
60 options += " keep" if fetch["keep"] else " fetchall"
61 fetchmailrc += RC_LINE.format(
62 user_email=escape_rc_string(fetch["user_email"]),
63 protocol=fetch["protocol"],
64 host=escape_rc_string(fetch["host"]),
65 port=fetch["port"],
66 smtphost=smtphostport,
67 username=escape_rc_string(fetch["username"]),
68 password=escape_rc_string(fetch["password"]),
69 options=options
70 )
71 if debug:
72 print(fetchmailrc)
73 try:
74 print(fetchmail(fetchmailrc))
75 error_message = ""
76 except subprocess.CalledProcessError as error:
77 error_message = error.output.decode("utf8")
78 # No mail is not an error
79 if not error_message.startswith("fetchmail: No mail"):
80 print(error_message)
81 user_info = "for %s at %s" % (fetch["user_email"], fetch["host"])
82 # Number of messages seen is not a error as well
83 if ("messages" in error_message and
84 "(seen " in error_message and
85 user_info in error_message):
86 print(error_message)
87 finally:
88 requests.post("http://admin/internal/fetch/{}".format(fetch["id"]),
89 json=error_message.split("\n")[0]
90 )
91 except Exception:
92 traceback.print_exc()
93
94
95 if __name__ == "__main__":
96 while True:
97 time.sleep(int(os.environ.get("FETCHMAIL_DELAY", 60)))
98 run(os.environ.get("DEBUG", None) == "True")
99 sys.stdout.flush()
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py
--- a/optional/fetchmail/fetchmail.py
+++ b/optional/fetchmail/fetchmail.py
@@ -47,7 +47,7 @@
def run(debug):
try:
- fetches = requests.get("http://admin/internal/fetch").json()
+ fetches = requests.get("http://" + os.environ.get("HOST_ADMIN", "admin") + "/internal/fetch").json()
smtphost, smtpport = extract_host_port(os.environ.get("HOST_SMTP", "smtp"), None)
if smtpport is None:
smtphostport = smtphost
@@ -85,7 +85,7 @@
user_info in error_message):
print(error_message)
finally:
- requests.post("http://admin/internal/fetch/{}".format(fetch["id"]),
+ requests.post("http://" + os.environ.get("HOST_ADMIN", "admin") + "/internal/fetch/{}".format(fetch["id"]),
json=error_message.split("\n")[0]
)
except Exception:
| {"golden_diff": "diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py\n--- a/optional/fetchmail/fetchmail.py\n+++ b/optional/fetchmail/fetchmail.py\n@@ -47,7 +47,7 @@\n \n def run(debug):\n try:\n- fetches = requests.get(\"http://admin/internal/fetch\").json()\n+ fetches = requests.get(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n@@ -85,7 +85,7 @@\n user_info in error_message):\n print(error_message)\n finally:\n- requests.post(\"http://admin/internal/fetch/{}\".format(fetch[\"id\"]),\n+ requests.post(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n except Exception:\n", "issue": "Hardcoded http://admin/ in fetchmail.py\nI've tweaked ``docker-compose.yml`` so that all my containers related to ``mailu`` are prefixed by ``mailu-``, in order to pro-actively avoid conflict with any other containers I may eventually define in future.\r\n\r\nHowever, the hardcode ``http://admin/...`` below causes failure in ``fetchmail``, since my container is now named ``mailu-admin`` in my ``docker-compose.yml``, not ``admin`` as the code supposes it should be.\r\n\r\n```\r\n./services/fetchmail/fetchmail.py:47: fetches = requests.get(\"http://admin/internal/fetch\").json()\r\n./services/fetchmail/fetchmail.py:85: requests.post(\"http://admin/internal/fetch/{}\".format(fetch[\"id\"]),\r\n```\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport time\nimport os\nimport tempfile\nimport shlex\nimport subprocess\nimport re\nimport requests\nimport sys\nimport traceback\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n\"\"\"\n\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\n\ndef escape_rc_string(arg):\n return \"\".join(\"\\\\x%2x\" % ord(char) for char in arg)\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n try:\n fetches = requests.get(\"http://admin/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n else:\n smtphostport = \"%s/%d\" % (smtphost, smtpport)\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=smtphostport,\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://admin/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n except Exception:\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n while True:\n time.sleep(int(os.environ.get(\"FETCHMAIL_DELAY\", 60)))\n run(os.environ.get(\"DEBUG\", None) == \"True\")\n sys.stdout.flush()\n", "path": "optional/fetchmail/fetchmail.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\nimport time\nimport os\nimport tempfile\nimport shlex\nimport subprocess\nimport re\nimport requests\nimport sys\nimport traceback\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n\"\"\"\n\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\n\ndef escape_rc_string(arg):\n return \"\".join(\"\\\\x%2x\" % ord(char) for char in arg)\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n try:\n fetches = requests.get(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n else:\n smtphostport = \"%s/%d\" % (smtphost, smtpport)\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=smtphostport,\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n except Exception:\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n while True:\n time.sleep(int(os.environ.get(\"FETCHMAIL_DELAY\", 60)))\n run(os.environ.get(\"DEBUG\", None) == \"True\")\n sys.stdout.flush()\n", "path": "optional/fetchmail/fetchmail.py"}]} | 1,367 | 250 |
gh_patches_debug_9389 | rasdani/github-patches | git_diff | opsdroid__opsdroid-1774 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mattermost connector is broken
# Description
The Mattermost connector is broken. It can connect to a Mattermost instance, but when sending a message to OpsDroid (using the Hello skill) you get:
```
ERROR opsdroid.core: Exception when running skill 'hello'.
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/opsdroid/core.py", line 465, in run_skill
return await skill(event)
File "/root/.local/share/opsdroid/opsdroid-modules/skill/hello/__init__.py", line 13, in hello
await message.respond(text)
File "/usr/local/lib/python3.8/site-packages/opsdroid/events.py", line 278, in respond
"thinking-delay" in self.connector.configuration
AttributeError: 'NoneType' object has no attribute 'configuration'
WARNING mattermostdriver.websocket: Failed to establish websocket connection: 'NoneType' object has no attribute 'configuration'
```
## Steps to Reproduce
Configure the Mattermost connector and the Hello skill, start Opsdroid and send a message to the bot in Mattermost.
## Expected Functionality
A reply form the Hello skill.
## Experienced Functionality
No reply, and the above error in the Opsdroid logs.
## Versions
- **Opsdroid version:** 0.22.0
- **Python version:** 3.8
- **OS/Docker version:** N/A
## Configuration File
Please include your version of the configuration file below.
```yaml
welcome-message: false
connectors:
## Mattermost (core)
mattermost:
# Required
token: "<redacted>"
url: "<redacted>"
team-name: "<redacted>"
# Optional
scheme: "https" # default: https
port: 443 # default: 8065
ssl-verify: true # default: true
connect-timeout: 30 # default: 30
skills:
## Hello (https://github.com/opsdroid/skill-hello)
hello: {}
## Seen (https://github.com/opsdroid/skill-seen)
seen: {}
```
## Additional Details
Looks like this the Mattermost connector was missed in #1116 -- I'll submit a PR shortly to correct this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/connector/mattermost/__init__.py`
Content:
```
1 """A connector for Mattermost."""
2 import logging
3 import json
4
5 from mattermostdriver import Driver, Websocket
6 from voluptuous import Required
7
8 from opsdroid.connector import Connector, register_event
9 from opsdroid.events import Message
10
11 _LOGGER = logging.getLogger(__name__)
12 CONFIG_SCHEMA = {
13 Required("token"): str,
14 Required("url"): str,
15 Required("team-name"): str,
16 "scheme": str,
17 "port": int,
18 "ssl-verify": bool,
19 "connect-timeout": int,
20 }
21
22
23 class ConnectorMattermost(Connector):
24 """A connector for Mattermost."""
25
26 def __init__(self, config, opsdroid=None):
27 """Create the connector."""
28 super().__init__(config, opsdroid=opsdroid)
29 _LOGGER.debug(_("Starting Mattermost connector"))
30 self.name = "mattermost"
31 self.token = config["token"]
32 self.url = config["url"]
33 self.team_name = config["team-name"]
34 self.scheme = config.get("scheme", "https")
35 self.port = config.get("port", 8065)
36 self.verify = config.get("ssl-verify", True)
37 self.timeout = config.get("connect-timeout", 30)
38 self.request_timeout = None
39 self.mfa_token = None
40 self.debug = False
41 self.listening = True
42
43 self.mm_driver = Driver(
44 {
45 "url": self.url,
46 "token": self.token,
47 "scheme": self.scheme,
48 "port": self.port,
49 "verify": self.verify,
50 "timeout": self.timeout,
51 "request_timeout": self.request_timeout,
52 "mfa_token": self.mfa_token,
53 "debug": self.debug,
54 }
55 )
56
57 async def connect(self):
58 """Connect to the chat service."""
59 _LOGGER.info(_("Connecting to Mattermost"))
60
61 login_response = self.mm_driver.login()
62
63 _LOGGER.info(login_response)
64
65 if "id" in login_response:
66 self.bot_id = login_response["id"]
67 if "username" in login_response:
68 self.bot_name = login_response["username"]
69
70 _LOGGER.info(_("Connected as %s"), self.bot_name)
71
72 self.mm_driver.websocket = Websocket(
73 self.mm_driver.options, self.mm_driver.client.token
74 )
75
76 _LOGGER.info(_("Connected successfully"))
77
78 async def disconnect(self):
79 """Disconnect from Mattermost."""
80 self.listening = False
81 self.mm_driver.logout()
82
83 async def listen(self):
84 """Listen for and parse new messages."""
85 await self.mm_driver.websocket.connect(self.process_message)
86
87 async def process_message(self, raw_message):
88 """Process a raw message and pass it to the parser."""
89 _LOGGER.info(raw_message)
90
91 message = json.loads(raw_message)
92
93 if "event" in message and message["event"] == "posted":
94 data = message["data"]
95 post = json.loads(data["post"])
96 await self.opsdroid.parse(
97 Message(
98 post["message"],
99 data["sender_name"],
100 data["channel_name"],
101 self,
102 raw_event=message,
103 )
104 )
105
106 @register_event(Message)
107 async def send_message(self, message):
108 """Respond with a message."""
109 _LOGGER.debug(
110 _("Responding with: '%s' in room %s"), message.text, message.target
111 )
112 channel_id = self.mm_driver.channels.get_channel_by_name_and_team_name(
113 self.team_name, message.target
114 )["id"]
115 self.mm_driver.posts.create_post(
116 options={"channel_id": channel_id, "message": message.text}
117 )
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/connector/mattermost/__init__.py b/opsdroid/connector/mattermost/__init__.py
--- a/opsdroid/connector/mattermost/__init__.py
+++ b/opsdroid/connector/mattermost/__init__.py
@@ -95,10 +95,10 @@
post = json.loads(data["post"])
await self.opsdroid.parse(
Message(
- post["message"],
- data["sender_name"],
- data["channel_name"],
- self,
+ text=post["message"],
+ user=data["sender_name"],
+ target=data["channel_name"],
+ connector=self,
raw_event=message,
)
)
| {"golden_diff": "diff --git a/opsdroid/connector/mattermost/__init__.py b/opsdroid/connector/mattermost/__init__.py\n--- a/opsdroid/connector/mattermost/__init__.py\n+++ b/opsdroid/connector/mattermost/__init__.py\n@@ -95,10 +95,10 @@\n post = json.loads(data[\"post\"])\n await self.opsdroid.parse(\n Message(\n- post[\"message\"],\n- data[\"sender_name\"],\n- data[\"channel_name\"],\n- self,\n+ text=post[\"message\"],\n+ user=data[\"sender_name\"],\n+ target=data[\"channel_name\"],\n+ connector=self,\n raw_event=message,\n )\n )\n", "issue": "Mattermost connector is broken\n# Description\r\nThe Mattermost connector is broken. It can connect to a Mattermost instance, but when sending a message to OpsDroid (using the Hello skill) you get:\r\n\r\n```\r\nERROR opsdroid.core: Exception when running skill 'hello'.\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/opsdroid/core.py\", line 465, in run_skill\r\n return await skill(event)\r\n File \"/root/.local/share/opsdroid/opsdroid-modules/skill/hello/__init__.py\", line 13, in hello\r\n await message.respond(text)\r\n File \"/usr/local/lib/python3.8/site-packages/opsdroid/events.py\", line 278, in respond\r\n \"thinking-delay\" in self.connector.configuration\r\nAttributeError: 'NoneType' object has no attribute 'configuration'\r\nWARNING mattermostdriver.websocket: Failed to establish websocket connection: 'NoneType' object has no attribute 'configuration'\r\n```\r\n\r\n## Steps to Reproduce\r\nConfigure the Mattermost connector and the Hello skill, start Opsdroid and send a message to the bot in Mattermost.\r\n\r\n\r\n## Expected Functionality\r\nA reply form the Hello skill.\r\n\r\n\r\n## Experienced Functionality\r\nNo reply, and the above error in the Opsdroid logs.\r\n\r\n## Versions\r\n- **Opsdroid version:** 0.22.0\r\n- **Python version:** 3.8\r\n- **OS/Docker version:** N/A\r\n\r\n## Configuration File\r\nPlease include your version of the configuration file below.\r\n\r\n```yaml\r\nwelcome-message: false\r\n\r\nconnectors:\r\n ## Mattermost (core)\r\n mattermost:\r\n # Required\r\n token: \"<redacted>\"\r\n url: \"<redacted>\"\r\n team-name: \"<redacted>\"\r\n # Optional\r\n scheme: \"https\" # default: https\r\n port: 443 # default: 8065\r\n ssl-verify: true # default: true\r\n connect-timeout: 30 # default: 30\r\n\r\nskills:\r\n ## Hello (https://github.com/opsdroid/skill-hello)\r\n hello: {}\r\n ## Seen (https://github.com/opsdroid/skill-seen)\r\n seen: {}\r\n```\r\n\r\n## Additional Details\r\nLooks like this the Mattermost connector was missed in #1116 -- I'll submit a PR shortly to correct this.\n", "before_files": [{"content": "\"\"\"A connector for Mattermost.\"\"\"\nimport logging\nimport json\n\nfrom mattermostdriver import Driver, Websocket\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n Required(\"url\"): str,\n Required(\"team-name\"): str,\n \"scheme\": str,\n \"port\": int,\n \"ssl-verify\": bool,\n \"connect-timeout\": int,\n}\n\n\nclass ConnectorMattermost(Connector):\n \"\"\"A connector for Mattermost.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Mattermost connector\"))\n self.name = \"mattermost\"\n self.token = config[\"token\"]\n self.url = config[\"url\"]\n self.team_name = config[\"team-name\"]\n self.scheme = config.get(\"scheme\", \"https\")\n self.port = config.get(\"port\", 8065)\n self.verify = config.get(\"ssl-verify\", True)\n self.timeout = config.get(\"connect-timeout\", 30)\n self.request_timeout = None\n self.mfa_token = None\n self.debug = False\n self.listening = True\n\n self.mm_driver = Driver(\n {\n \"url\": self.url,\n \"token\": self.token,\n \"scheme\": self.scheme,\n \"port\": self.port,\n \"verify\": self.verify,\n \"timeout\": self.timeout,\n \"request_timeout\": self.request_timeout,\n \"mfa_token\": self.mfa_token,\n \"debug\": self.debug,\n }\n )\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Mattermost\"))\n\n login_response = self.mm_driver.login()\n\n _LOGGER.info(login_response)\n\n if \"id\" in login_response:\n self.bot_id = login_response[\"id\"]\n if \"username\" in login_response:\n self.bot_name = login_response[\"username\"]\n\n _LOGGER.info(_(\"Connected as %s\"), self.bot_name)\n\n self.mm_driver.websocket = Websocket(\n self.mm_driver.options, self.mm_driver.client.token\n )\n\n _LOGGER.info(_(\"Connected successfully\"))\n\n async def disconnect(self):\n \"\"\"Disconnect from Mattermost.\"\"\"\n self.listening = False\n self.mm_driver.logout()\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n await self.mm_driver.websocket.connect(self.process_message)\n\n async def process_message(self, raw_message):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n _LOGGER.info(raw_message)\n\n message = json.loads(raw_message)\n\n if \"event\" in message and message[\"event\"] == \"posted\":\n data = message[\"data\"]\n post = json.loads(data[\"post\"])\n await self.opsdroid.parse(\n Message(\n post[\"message\"],\n data[\"sender_name\"],\n data[\"channel_name\"],\n self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s\"), message.text, message.target\n )\n channel_id = self.mm_driver.channels.get_channel_by_name_and_team_name(\n self.team_name, message.target\n )[\"id\"]\n self.mm_driver.posts.create_post(\n options={\"channel_id\": channel_id, \"message\": message.text}\n )\n", "path": "opsdroid/connector/mattermost/__init__.py"}], "after_files": [{"content": "\"\"\"A connector for Mattermost.\"\"\"\nimport logging\nimport json\n\nfrom mattermostdriver import Driver, Websocket\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n Required(\"url\"): str,\n Required(\"team-name\"): str,\n \"scheme\": str,\n \"port\": int,\n \"ssl-verify\": bool,\n \"connect-timeout\": int,\n}\n\n\nclass ConnectorMattermost(Connector):\n \"\"\"A connector for Mattermost.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Mattermost connector\"))\n self.name = \"mattermost\"\n self.token = config[\"token\"]\n self.url = config[\"url\"]\n self.team_name = config[\"team-name\"]\n self.scheme = config.get(\"scheme\", \"https\")\n self.port = config.get(\"port\", 8065)\n self.verify = config.get(\"ssl-verify\", True)\n self.timeout = config.get(\"connect-timeout\", 30)\n self.request_timeout = None\n self.mfa_token = None\n self.debug = False\n self.listening = True\n\n self.mm_driver = Driver(\n {\n \"url\": self.url,\n \"token\": self.token,\n \"scheme\": self.scheme,\n \"port\": self.port,\n \"verify\": self.verify,\n \"timeout\": self.timeout,\n \"request_timeout\": self.request_timeout,\n \"mfa_token\": self.mfa_token,\n \"debug\": self.debug,\n }\n )\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Mattermost\"))\n\n login_response = self.mm_driver.login()\n\n _LOGGER.info(login_response)\n\n if \"id\" in login_response:\n self.bot_id = login_response[\"id\"]\n if \"username\" in login_response:\n self.bot_name = login_response[\"username\"]\n\n _LOGGER.info(_(\"Connected as %s\"), self.bot_name)\n\n self.mm_driver.websocket = Websocket(\n self.mm_driver.options, self.mm_driver.client.token\n )\n\n _LOGGER.info(_(\"Connected successfully\"))\n\n async def disconnect(self):\n \"\"\"Disconnect from Mattermost.\"\"\"\n self.listening = False\n self.mm_driver.logout()\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n await self.mm_driver.websocket.connect(self.process_message)\n\n async def process_message(self, raw_message):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n _LOGGER.info(raw_message)\n\n message = json.loads(raw_message)\n\n if \"event\" in message and message[\"event\"] == \"posted\":\n data = message[\"data\"]\n post = json.loads(data[\"post\"])\n await self.opsdroid.parse(\n Message(\n text=post[\"message\"],\n user=data[\"sender_name\"],\n target=data[\"channel_name\"],\n connector=self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s\"), message.text, message.target\n )\n channel_id = self.mm_driver.channels.get_channel_by_name_and_team_name(\n self.team_name, message.target\n )[\"id\"]\n self.mm_driver.posts.create_post(\n options={\"channel_id\": channel_id, \"message\": message.text}\n )\n", "path": "opsdroid/connector/mattermost/__init__.py"}]} | 1,808 | 159 |
gh_patches_debug_15319 | rasdani/github-patches | git_diff | ibis-project__ibis-1816 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PKG: Add pre-commit, black and isort to setup.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import pathlib
4 import sys
5
6 from setuptools import find_packages, setup
7
8 import versioneer
9
10 LONG_DESCRIPTION = """
11 Ibis is a productivity-centric Python big data framework.
12
13 See http://docs.ibis-project.org
14 """
15
16 VERSION = sys.version_info.major, sys.version_info.minor
17
18 impala_requires = ['hdfs>=2.0.16', 'sqlalchemy', 'requests']
19 if VERSION == (3, 5):
20 impala_requires.append('impyla<0.14.2')
21 else:
22 impala_requires.append('impyla>=0.15.0')
23
24 sqlite_requires = ['sqlalchemy']
25 postgres_requires = sqlite_requires + ['psycopg2']
26 mysql_requires = sqlite_requires + ['pymysql']
27
28 if VERSION == (3, 5):
29 mapd_requires = ['pymapd>=0.8.3,<0.11.0']
30 else:
31 mapd_requires = ['pymapd>=0.12.0']
32 kerberos_requires = ['requests-kerberos']
33 visualization_requires = ['graphviz']
34 clickhouse_requires = ['clickhouse-driver>=0.0.8', 'clickhouse-cityhash']
35 bigquery_requires = ['google-cloud-bigquery>=1.0.0', 'pydata-google-auth']
36 hdf5_requires = ['tables>=3.0.0']
37
38 if VERSION == (3, 5):
39 parquet_requires = ['pyarrow<0.12.0']
40 else:
41 parquet_requires = ['pyarrow>=0.12.0']
42
43 all_requires = (
44 impala_requires
45 + postgres_requires
46 + mapd_requires
47 + mysql_requires
48 + kerberos_requires
49 + visualization_requires
50 + clickhouse_requires
51 + bigquery_requires
52 + hdf5_requires
53 + parquet_requires
54 )
55
56 develop_requires = all_requires + ['click', 'flake8', 'mypy', 'pytest>=3']
57
58 install_requires = [
59 line.strip()
60 for line in pathlib.Path(__file__)
61 .parent.joinpath('requirements.txt')
62 .read_text()
63 .splitlines()
64 ]
65
66 setup(
67 name='ibis-framework',
68 url='https://github.com/ibis-project/ibis',
69 packages=find_packages(),
70 version=versioneer.get_version(),
71 cmdclass=versioneer.get_cmdclass(),
72 install_requires=install_requires,
73 python_requires='>=3.5',
74 extras_require={
75 'all': all_requires,
76 'develop': develop_requires,
77 'impala': impala_requires,
78 'kerberos': kerberos_requires,
79 'postgres': postgres_requires,
80 'mapd': mapd_requires,
81 'mysql': mysql_requires,
82 'sqlite': sqlite_requires,
83 'visualization': visualization_requires,
84 'clickhouse': clickhouse_requires,
85 'bigquery': bigquery_requires,
86 'hdf5': hdf5_requires,
87 'parquet': parquet_requires,
88 },
89 description="Productivity-centric Python Big Data Framework",
90 long_description=LONG_DESCRIPTION,
91 classifiers=[
92 'Development Status :: 4 - Beta',
93 'Operating System :: OS Independent',
94 'Intended Audience :: Science/Research',
95 'Programming Language :: Python',
96 'Programming Language :: Python :: 3',
97 'Topic :: Scientific/Engineering',
98 ],
99 license='Apache License, Version 2.0',
100 maintainer="Phillip Cloud",
101 maintainer_email="[email protected]",
102 )
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -53,7 +53,14 @@
+ parquet_requires
)
-develop_requires = all_requires + ['click', 'flake8', 'mypy', 'pytest>=3']
+develop_requires = all_requires + [
+ 'click',
+ 'flake8',
+ 'isort',
+ 'mypy',
+ 'pre-commit',
+ 'pytest>=3',
+]
install_requires = [
line.strip()
@@ -73,7 +80,8 @@
python_requires='>=3.5',
extras_require={
'all': all_requires,
- 'develop': develop_requires,
+ 'develop:python_version > "3.5"': develop_requires + ['black'],
+ 'develop:python_version == "3.5"': develop_requires,
'impala': impala_requires,
'kerberos': kerberos_requires,
'postgres': postgres_requires,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -53,7 +53,14 @@\n + parquet_requires\n )\n \n-develop_requires = all_requires + ['click', 'flake8', 'mypy', 'pytest>=3']\n+develop_requires = all_requires + [\n+ 'click',\n+ 'flake8',\n+ 'isort',\n+ 'mypy',\n+ 'pre-commit',\n+ 'pytest>=3',\n+]\n \n install_requires = [\n line.strip()\n@@ -73,7 +80,8 @@\n python_requires='>=3.5',\n extras_require={\n 'all': all_requires,\n- 'develop': develop_requires,\n+ 'develop:python_version > \"3.5\"': develop_requires + ['black'],\n+ 'develop:python_version == \"3.5\"': develop_requires,\n 'impala': impala_requires,\n 'kerberos': kerberos_requires,\n 'postgres': postgres_requires,\n", "issue": "PKG: Add pre-commit, black and isort to setup.py\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport pathlib\nimport sys\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nLONG_DESCRIPTION = \"\"\"\nIbis is a productivity-centric Python big data framework.\n\nSee http://docs.ibis-project.org\n\"\"\"\n\nVERSION = sys.version_info.major, sys.version_info.minor\n\nimpala_requires = ['hdfs>=2.0.16', 'sqlalchemy', 'requests']\nif VERSION == (3, 5):\n impala_requires.append('impyla<0.14.2')\nelse:\n impala_requires.append('impyla>=0.15.0')\n\nsqlite_requires = ['sqlalchemy']\npostgres_requires = sqlite_requires + ['psycopg2']\nmysql_requires = sqlite_requires + ['pymysql']\n\nif VERSION == (3, 5):\n mapd_requires = ['pymapd>=0.8.3,<0.11.0']\nelse:\n mapd_requires = ['pymapd>=0.12.0']\nkerberos_requires = ['requests-kerberos']\nvisualization_requires = ['graphviz']\nclickhouse_requires = ['clickhouse-driver>=0.0.8', 'clickhouse-cityhash']\nbigquery_requires = ['google-cloud-bigquery>=1.0.0', 'pydata-google-auth']\nhdf5_requires = ['tables>=3.0.0']\n\nif VERSION == (3, 5):\n parquet_requires = ['pyarrow<0.12.0']\nelse:\n parquet_requires = ['pyarrow>=0.12.0']\n\nall_requires = (\n impala_requires\n + postgres_requires\n + mapd_requires\n + mysql_requires\n + kerberos_requires\n + visualization_requires\n + clickhouse_requires\n + bigquery_requires\n + hdf5_requires\n + parquet_requires\n)\n\ndevelop_requires = all_requires + ['click', 'flake8', 'mypy', 'pytest>=3']\n\ninstall_requires = [\n line.strip()\n for line in pathlib.Path(__file__)\n .parent.joinpath('requirements.txt')\n .read_text()\n .splitlines()\n]\n\nsetup(\n name='ibis-framework',\n url='https://github.com/ibis-project/ibis',\n packages=find_packages(),\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n install_requires=install_requires,\n python_requires='>=3.5',\n extras_require={\n 'all': all_requires,\n 'develop': develop_requires,\n 'impala': impala_requires,\n 'kerberos': kerberos_requires,\n 'postgres': postgres_requires,\n 'mapd': mapd_requires,\n 'mysql': mysql_requires,\n 'sqlite': sqlite_requires,\n 'visualization': visualization_requires,\n 'clickhouse': clickhouse_requires,\n 'bigquery': bigquery_requires,\n 'hdf5': hdf5_requires,\n 'parquet': parquet_requires,\n },\n description=\"Productivity-centric Python Big Data Framework\",\n long_description=LONG_DESCRIPTION,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering',\n ],\n license='Apache License, Version 2.0',\n maintainer=\"Phillip Cloud\",\n maintainer_email=\"[email protected]\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport pathlib\nimport sys\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nLONG_DESCRIPTION = \"\"\"\nIbis is a productivity-centric Python big data framework.\n\nSee http://docs.ibis-project.org\n\"\"\"\n\nVERSION = sys.version_info.major, sys.version_info.minor\n\nimpala_requires = ['hdfs>=2.0.16', 'sqlalchemy', 'requests']\nif VERSION == (3, 5):\n impala_requires.append('impyla<0.14.2')\nelse:\n impala_requires.append('impyla>=0.15.0')\n\nsqlite_requires = ['sqlalchemy']\npostgres_requires = sqlite_requires + ['psycopg2']\nmysql_requires = sqlite_requires + ['pymysql']\n\nif VERSION == (3, 5):\n mapd_requires = ['pymapd>=0.8.3,<0.11.0']\nelse:\n mapd_requires = ['pymapd>=0.12.0']\nkerberos_requires = ['requests-kerberos']\nvisualization_requires = ['graphviz']\nclickhouse_requires = ['clickhouse-driver>=0.0.8', 'clickhouse-cityhash']\nbigquery_requires = ['google-cloud-bigquery>=1.0.0', 'pydata-google-auth']\nhdf5_requires = ['tables>=3.0.0']\n\nif VERSION == (3, 5):\n parquet_requires = ['pyarrow<0.12.0']\nelse:\n parquet_requires = ['pyarrow>=0.12.0']\n\nall_requires = (\n impala_requires\n + postgres_requires\n + mapd_requires\n + mysql_requires\n + kerberos_requires\n + visualization_requires\n + clickhouse_requires\n + bigquery_requires\n + hdf5_requires\n + parquet_requires\n)\n\ndevelop_requires = all_requires + [\n 'click',\n 'flake8',\n 'isort',\n 'mypy',\n 'pre-commit',\n 'pytest>=3',\n]\n\ninstall_requires = [\n line.strip()\n for line in pathlib.Path(__file__)\n .parent.joinpath('requirements.txt')\n .read_text()\n .splitlines()\n]\n\nsetup(\n name='ibis-framework',\n url='https://github.com/ibis-project/ibis',\n packages=find_packages(),\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n install_requires=install_requires,\n python_requires='>=3.5',\n extras_require={\n 'all': all_requires,\n 'develop:python_version > \"3.5\"': develop_requires + ['black'],\n 'develop:python_version == \"3.5\"': develop_requires,\n 'impala': impala_requires,\n 'kerberos': kerberos_requires,\n 'postgres': postgres_requires,\n 'mapd': mapd_requires,\n 'mysql': mysql_requires,\n 'sqlite': sqlite_requires,\n 'visualization': visualization_requires,\n 'clickhouse': clickhouse_requires,\n 'bigquery': bigquery_requires,\n 'hdf5': hdf5_requires,\n 'parquet': parquet_requires,\n },\n description=\"Productivity-centric Python Big Data Framework\",\n long_description=LONG_DESCRIPTION,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering',\n ],\n license='Apache License, Version 2.0',\n maintainer=\"Phillip Cloud\",\n maintainer_email=\"[email protected]\",\n)\n", "path": "setup.py"}]} | 1,220 | 220 |
gh_patches_debug_20919 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-298 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Report error requests on Pyramid
Currently if an error occurs in a Pyramid request, we don't report it. Error cases can be just as useful to see so we should try and do this. It looks like it's possible by rearranging the existing code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/pyramid.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import scout_apm.core
5 from scout_apm.core.config import ScoutConfig
6 from scout_apm.core.tracked_request import TrackedRequest
7 from scout_apm.core.web_requests import (
8 create_filtered_path,
9 ignore_path,
10 track_amazon_request_queue_time,
11 track_request_queue_time,
12 )
13
14
15 def includeme(config):
16 configs = {}
17 pyramid_config = config.get_settings()
18 for name in pyramid_config:
19 if name.startswith("SCOUT_"):
20 value = pyramid_config[name]
21 clean_name = name.replace("SCOUT_", "").lower()
22 configs[clean_name] = value
23 ScoutConfig.set(**configs)
24
25 if scout_apm.core.install():
26 config.add_tween("scout_apm.pyramid.instruments")
27
28
29 def instruments(handler, registry):
30 def scout_tween(request):
31 tracked_request = TrackedRequest.instance()
32 span = tracked_request.start_span(operation="Controller/Pyramid")
33
34 try:
35 path = request.path
36 # mixed() returns values as *either* single items or lists
37 url_params = [
38 (k, v) for k, vs in request.GET.dict_of_lists().items() for v in vs
39 ]
40 tracked_request.tag("path", create_filtered_path(path, url_params))
41 if ignore_path(path):
42 tracked_request.tag("ignore_transaction", True)
43
44 try:
45 # Determine a remote IP to associate with the request. The value is
46 # spoofable by the requester so this is not suitable to use in any
47 # security sensitive context.
48 user_ip = (
49 request.headers.get("x-forwarded-for", default="").split(",")[0]
50 or request.headers.get("client-ip", default="").split(",")[0]
51 or request.remote_addr
52 )
53 except Exception:
54 pass
55 else:
56 tracked_request.tag("user_ip", user_ip)
57
58 tracked_queue_time = False
59 try:
60 queue_time = request.headers.get(
61 "x-queue-start", default=""
62 ) or request.headers.get("x-request-start", default="")
63 except Exception:
64 pass
65 else:
66 tracked_queue_time = track_request_queue_time(
67 queue_time, tracked_request
68 )
69 if not tracked_queue_time:
70 try:
71 amazon_queue_time = request.headers.get(
72 "x-amzn-trace-id", default=""
73 )
74 except Exception:
75 pass
76 else:
77 track_amazon_request_queue_time(amazon_queue_time, tracked_request)
78
79 try:
80 response = handler(request)
81 except Exception:
82 tracked_request.tag("error", "true")
83 raise
84
85 # This happens further down the call chain. So time it starting
86 # above, but only name it if it gets to here.
87 if request.matched_route is not None:
88 tracked_request.mark_real_request()
89 span.operation = "Controller/" + request.matched_route.name
90
91 finally:
92 tracked_request.stop_span()
93
94 return response
95
96 return scout_tween
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/scout_apm/pyramid.py b/src/scout_apm/pyramid.py
--- a/src/scout_apm/pyramid.py
+++ b/src/scout_apm/pyramid.py
@@ -77,17 +77,18 @@
track_amazon_request_queue_time(amazon_queue_time, tracked_request)
try:
- response = handler(request)
+ try:
+ response = handler(request)
+ finally:
+ # Routing further down the call chain. So time it starting
+ # above, but only name it if it gets a name
+ if request.matched_route is not None:
+ tracked_request.mark_real_request()
+ span.operation = "Controller/" + request.matched_route.name
except Exception:
tracked_request.tag("error", "true")
raise
- # This happens further down the call chain. So time it starting
- # above, but only name it if it gets to here.
- if request.matched_route is not None:
- tracked_request.mark_real_request()
- span.operation = "Controller/" + request.matched_route.name
-
finally:
tracked_request.stop_span()
| {"golden_diff": "diff --git a/src/scout_apm/pyramid.py b/src/scout_apm/pyramid.py\n--- a/src/scout_apm/pyramid.py\n+++ b/src/scout_apm/pyramid.py\n@@ -77,17 +77,18 @@\n track_amazon_request_queue_time(amazon_queue_time, tracked_request)\n \n try:\n- response = handler(request)\n+ try:\n+ response = handler(request)\n+ finally:\n+ # Routing further down the call chain. So time it starting\n+ # above, but only name it if it gets a name\n+ if request.matched_route is not None:\n+ tracked_request.mark_real_request()\n+ span.operation = \"Controller/\" + request.matched_route.name\n except Exception:\n tracked_request.tag(\"error\", \"true\")\n raise\n \n- # This happens further down the call chain. So time it starting\n- # above, but only name it if it gets to here.\n- if request.matched_route is not None:\n- tracked_request.mark_real_request()\n- span.operation = \"Controller/\" + request.matched_route.name\n-\n finally:\n tracked_request.stop_span()\n", "issue": "Report error requests on Pyramid\nCurrently if an error occurs in a Pyramid request, we don't report it. Error cases can be just as useful to see so we should try and do this. It looks like it's possible by rearranging the existing code.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport scout_apm.core\nfrom scout_apm.core.config import ScoutConfig\nfrom scout_apm.core.tracked_request import TrackedRequest\nfrom scout_apm.core.web_requests import (\n create_filtered_path,\n ignore_path,\n track_amazon_request_queue_time,\n track_request_queue_time,\n)\n\n\ndef includeme(config):\n configs = {}\n pyramid_config = config.get_settings()\n for name in pyramid_config:\n if name.startswith(\"SCOUT_\"):\n value = pyramid_config[name]\n clean_name = name.replace(\"SCOUT_\", \"\").lower()\n configs[clean_name] = value\n ScoutConfig.set(**configs)\n\n if scout_apm.core.install():\n config.add_tween(\"scout_apm.pyramid.instruments\")\n\n\ndef instruments(handler, registry):\n def scout_tween(request):\n tracked_request = TrackedRequest.instance()\n span = tracked_request.start_span(operation=\"Controller/Pyramid\")\n\n try:\n path = request.path\n # mixed() returns values as *either* single items or lists\n url_params = [\n (k, v) for k, vs in request.GET.dict_of_lists().items() for v in vs\n ]\n tracked_request.tag(\"path\", create_filtered_path(path, url_params))\n if ignore_path(path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n try:\n # Determine a remote IP to associate with the request. The value is\n # spoofable by the requester so this is not suitable to use in any\n # security sensitive context.\n user_ip = (\n request.headers.get(\"x-forwarded-for\", default=\"\").split(\",\")[0]\n or request.headers.get(\"client-ip\", default=\"\").split(\",\")[0]\n or request.remote_addr\n )\n except Exception:\n pass\n else:\n tracked_request.tag(\"user_ip\", user_ip)\n\n tracked_queue_time = False\n try:\n queue_time = request.headers.get(\n \"x-queue-start\", default=\"\"\n ) or request.headers.get(\"x-request-start\", default=\"\")\n except Exception:\n pass\n else:\n tracked_queue_time = track_request_queue_time(\n queue_time, tracked_request\n )\n if not tracked_queue_time:\n try:\n amazon_queue_time = request.headers.get(\n \"x-amzn-trace-id\", default=\"\"\n )\n except Exception:\n pass\n else:\n track_amazon_request_queue_time(amazon_queue_time, tracked_request)\n\n try:\n response = handler(request)\n except Exception:\n tracked_request.tag(\"error\", \"true\")\n raise\n\n # This happens further down the call chain. So time it starting\n # above, but only name it if it gets to here.\n if request.matched_route is not None:\n tracked_request.mark_real_request()\n span.operation = \"Controller/\" + request.matched_route.name\n\n finally:\n tracked_request.stop_span()\n\n return response\n\n return scout_tween\n", "path": "src/scout_apm/pyramid.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport scout_apm.core\nfrom scout_apm.core.config import ScoutConfig\nfrom scout_apm.core.tracked_request import TrackedRequest\nfrom scout_apm.core.web_requests import (\n create_filtered_path,\n ignore_path,\n track_amazon_request_queue_time,\n track_request_queue_time,\n)\n\n\ndef includeme(config):\n configs = {}\n pyramid_config = config.get_settings()\n for name in pyramid_config:\n if name.startswith(\"SCOUT_\"):\n value = pyramid_config[name]\n clean_name = name.replace(\"SCOUT_\", \"\").lower()\n configs[clean_name] = value\n ScoutConfig.set(**configs)\n\n if scout_apm.core.install():\n config.add_tween(\"scout_apm.pyramid.instruments\")\n\n\ndef instruments(handler, registry):\n def scout_tween(request):\n tracked_request = TrackedRequest.instance()\n span = tracked_request.start_span(operation=\"Controller/Pyramid\")\n\n try:\n path = request.path\n # mixed() returns values as *either* single items or lists\n url_params = [\n (k, v) for k, vs in request.GET.dict_of_lists().items() for v in vs\n ]\n tracked_request.tag(\"path\", create_filtered_path(path, url_params))\n if ignore_path(path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n try:\n # Determine a remote IP to associate with the request. The value is\n # spoofable by the requester so this is not suitable to use in any\n # security sensitive context.\n user_ip = (\n request.headers.get(\"x-forwarded-for\", default=\"\").split(\",\")[0]\n or request.headers.get(\"client-ip\", default=\"\").split(\",\")[0]\n or request.remote_addr\n )\n except Exception:\n pass\n else:\n tracked_request.tag(\"user_ip\", user_ip)\n\n tracked_queue_time = False\n try:\n queue_time = request.headers.get(\n \"x-queue-start\", default=\"\"\n ) or request.headers.get(\"x-request-start\", default=\"\")\n except Exception:\n pass\n else:\n tracked_queue_time = track_request_queue_time(\n queue_time, tracked_request\n )\n if not tracked_queue_time:\n try:\n amazon_queue_time = request.headers.get(\n \"x-amzn-trace-id\", default=\"\"\n )\n except Exception:\n pass\n else:\n track_amazon_request_queue_time(amazon_queue_time, tracked_request)\n\n try:\n try:\n response = handler(request)\n finally:\n # Routing further down the call chain. So time it starting\n # above, but only name it if it gets a name\n if request.matched_route is not None:\n tracked_request.mark_real_request()\n span.operation = \"Controller/\" + request.matched_route.name\n except Exception:\n tracked_request.tag(\"error\", \"true\")\n raise\n\n finally:\n tracked_request.stop_span()\n\n return response\n\n return scout_tween\n", "path": "src/scout_apm/pyramid.py"}]} | 1,153 | 255 |
gh_patches_debug_17705 | rasdani/github-patches | git_diff | open-mmlab__mmsegmentation-260 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
关于新增的RandomRotate
好像pipeline的__init__.py里面忘记导入这个变换了,导致现在无法使用。
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmseg/datasets/pipelines/__init__.py`
Content:
```
1 from .compose import Compose
2 from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,
3 Transpose, to_tensor)
4 from .loading import LoadAnnotations, LoadImageFromFile
5 from .test_time_aug import MultiScaleFlipAug
6 from .transforms import (Normalize, Pad, PhotoMetricDistortion, RandomCrop,
7 RandomFlip, Resize, SegRescale)
8
9 __all__ = [
10 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
11 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
12 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
13 'Normalize', 'SegRescale', 'PhotoMetricDistortion'
14 ]
15
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmseg/datasets/pipelines/__init__.py b/mmseg/datasets/pipelines/__init__.py
--- a/mmseg/datasets/pipelines/__init__.py
+++ b/mmseg/datasets/pipelines/__init__.py
@@ -4,11 +4,13 @@
from .loading import LoadAnnotations, LoadImageFromFile
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Normalize, Pad, PhotoMetricDistortion, RandomCrop,
- RandomFlip, Resize, SegRescale)
+ RandomFlip, RandomRotate, Rerange, Resize, RGB2Gray,
+ SegRescale)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
- 'Normalize', 'SegRescale', 'PhotoMetricDistortion'
+ 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',
+ 'Rerange', 'RGB2Gray'
]
| {"golden_diff": "diff --git a/mmseg/datasets/pipelines/__init__.py b/mmseg/datasets/pipelines/__init__.py\n--- a/mmseg/datasets/pipelines/__init__.py\n+++ b/mmseg/datasets/pipelines/__init__.py\n@@ -4,11 +4,13 @@\n from .loading import LoadAnnotations, LoadImageFromFile\n from .test_time_aug import MultiScaleFlipAug\n from .transforms import (Normalize, Pad, PhotoMetricDistortion, RandomCrop,\n- RandomFlip, Resize, SegRescale)\n+ RandomFlip, RandomRotate, Rerange, Resize, RGB2Gray,\n+ SegRescale)\n \n __all__ = [\n 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',\n 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',\n 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',\n- 'Normalize', 'SegRescale', 'PhotoMetricDistortion'\n+ 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',\n+ 'Rerange', 'RGB2Gray'\n ]\n", "issue": "\u5173\u4e8e\u65b0\u589e\u7684RandomRotate\n\u597d\u50cfpipeline\u7684__init__.py\u91cc\u9762\u5fd8\u8bb0\u5bfc\u5165\u8fd9\u4e2a\u53d8\u6362\u4e86\uff0c\u5bfc\u81f4\u73b0\u5728\u65e0\u6cd5\u4f7f\u7528\u3002\n", "before_files": [{"content": "from .compose import Compose\nfrom .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,\n Transpose, to_tensor)\nfrom .loading import LoadAnnotations, LoadImageFromFile\nfrom .test_time_aug import MultiScaleFlipAug\nfrom .transforms import (Normalize, Pad, PhotoMetricDistortion, RandomCrop,\n RandomFlip, Resize, SegRescale)\n\n__all__ = [\n 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',\n 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',\n 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',\n 'Normalize', 'SegRescale', 'PhotoMetricDistortion'\n]\n", "path": "mmseg/datasets/pipelines/__init__.py"}], "after_files": [{"content": "from .compose import Compose\nfrom .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,\n Transpose, to_tensor)\nfrom .loading import LoadAnnotations, LoadImageFromFile\nfrom .test_time_aug import MultiScaleFlipAug\nfrom .transforms import (Normalize, Pad, PhotoMetricDistortion, RandomCrop,\n RandomFlip, RandomRotate, Rerange, Resize, RGB2Gray,\n SegRescale)\n\n__all__ = [\n 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',\n 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',\n 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',\n 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',\n 'Rerange', 'RGB2Gray'\n]\n", "path": "mmseg/datasets/pipelines/__init__.py"}]} | 473 | 252 |
gh_patches_debug_2920 | rasdani/github-patches | git_diff | encode__starlette-195 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check directory exists when instantiating `StaticFiles`
The `StaticFiles` application should ensure that the directory exists at the point it is instantiated.
(With an optional switch to turn this behavior off)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/staticfiles.py`
Content:
```
1 import os
2 import stat
3
4 from aiofiles.os import stat as aio_stat
5
6 from starlette.responses import FileResponse, PlainTextResponse, Response
7 from starlette.types import ASGIInstance, Receive, Scope, Send
8
9
10 class StaticFiles:
11 def __init__(self, *, directory: str) -> None:
12 self.directory = directory
13 self.config_checked = False
14
15 def __call__(self, scope: Scope) -> ASGIInstance:
16 assert scope["type"] == "http"
17 if scope["method"] not in ("GET", "HEAD"):
18 return PlainTextResponse("Method Not Allowed", status_code=405)
19 path = os.path.normpath(os.path.join(*scope["path"].split("/")))
20 if path.startswith(".."):
21 return PlainTextResponse("Not Found", status_code=404)
22 path = os.path.join(self.directory, path)
23 if self.config_checked:
24 check_directory = None
25 else:
26 check_directory = self.directory
27 self.config_checked = True
28 return _StaticFilesResponder(scope, path=path, check_directory=check_directory)
29
30
31 class _StaticFilesResponder:
32 def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:
33 self.scope = scope
34 self.path = path
35 self.check_directory = check_directory
36
37 async def check_directory_configured_correctly(self) -> None:
38 """
39 Perform a one-off configuration check that StaticFiles is actually
40 pointed at a directory, so that we can raise loud errors rather than
41 just returning 404 responses.
42 """
43 directory = self.check_directory
44 try:
45 stat_result = await aio_stat(directory)
46 except FileNotFoundError:
47 raise RuntimeError("StaticFiles directory '%s' does not exist." % directory)
48 if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):
49 raise RuntimeError("StaticFiles path '%s' is not a directory." % directory)
50
51 async def __call__(self, receive: Receive, send: Send) -> None:
52 if self.check_directory is not None:
53 await self.check_directory_configured_correctly()
54
55 try:
56 stat_result = await aio_stat(self.path)
57 except FileNotFoundError:
58 response = PlainTextResponse("Not Found", status_code=404) # type: Response
59 else:
60 mode = stat_result.st_mode
61 if not stat.S_ISREG(mode):
62 response = PlainTextResponse("Not Found", status_code=404)
63 else:
64 response = FileResponse(self.path, stat_result=stat_result)
65
66 await response(receive, send)
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py
--- a/starlette/staticfiles.py
+++ b/starlette/staticfiles.py
@@ -8,7 +8,9 @@
class StaticFiles:
- def __init__(self, *, directory: str) -> None:
+ def __init__(self, *, directory: str, check_dir: bool = True) -> None:
+ if check_dir and not os.path.isdir(directory):
+ raise RuntimeError("Directory '%s' does not exist" % directory)
self.directory = directory
self.config_checked = False
| {"golden_diff": "diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py\n--- a/starlette/staticfiles.py\n+++ b/starlette/staticfiles.py\n@@ -8,7 +8,9 @@\n \n \n class StaticFiles:\n- def __init__(self, *, directory: str) -> None:\n+ def __init__(self, *, directory: str, check_dir: bool = True) -> None:\n+ if check_dir and not os.path.isdir(directory):\n+ raise RuntimeError(\"Directory '%s' does not exist\" % directory)\n self.directory = directory\n self.config_checked = False\n", "issue": "Check directory exists when instantiating `StaticFiles`\nThe `StaticFiles` application should ensure that the directory exists at the point it is instantiated.\r\n\r\n(With an optional switch to turn this behavior off)\n", "before_files": [{"content": "import os\nimport stat\n\nfrom aiofiles.os import stat as aio_stat\n\nfrom starlette.responses import FileResponse, PlainTextResponse, Response\nfrom starlette.types import ASGIInstance, Receive, Scope, Send\n\n\nclass StaticFiles:\n def __init__(self, *, directory: str) -> None:\n self.directory = directory\n self.config_checked = False\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n assert scope[\"type\"] == \"http\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n path = os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n if path.startswith(\"..\"):\n return PlainTextResponse(\"Not Found\", status_code=404)\n path = os.path.join(self.directory, path)\n if self.config_checked:\n check_directory = None\n else:\n check_directory = self.directory\n self.config_checked = True\n return _StaticFilesResponder(scope, path=path, check_directory=check_directory)\n\n\nclass _StaticFilesResponder:\n def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:\n self.scope = scope\n self.path = path\n self.check_directory = check_directory\n\n async def check_directory_configured_correctly(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n directory = self.check_directory\n try:\n stat_result = await aio_stat(directory)\n except FileNotFoundError:\n raise RuntimeError(\"StaticFiles directory '%s' does not exist.\" % directory)\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\"StaticFiles path '%s' is not a directory.\" % directory)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n if self.check_directory is not None:\n await self.check_directory_configured_correctly()\n\n try:\n stat_result = await aio_stat(self.path)\n except FileNotFoundError:\n response = PlainTextResponse(\"Not Found\", status_code=404) # type: Response\n else:\n mode = stat_result.st_mode\n if not stat.S_ISREG(mode):\n response = PlainTextResponse(\"Not Found\", status_code=404)\n else:\n response = FileResponse(self.path, stat_result=stat_result)\n\n await response(receive, send)\n", "path": "starlette/staticfiles.py"}], "after_files": [{"content": "import os\nimport stat\n\nfrom aiofiles.os import stat as aio_stat\n\nfrom starlette.responses import FileResponse, PlainTextResponse, Response\nfrom starlette.types import ASGIInstance, Receive, Scope, Send\n\n\nclass StaticFiles:\n def __init__(self, *, directory: str, check_dir: bool = True) -> None:\n if check_dir and not os.path.isdir(directory):\n raise RuntimeError(\"Directory '%s' does not exist\" % directory)\n self.directory = directory\n self.config_checked = False\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n assert scope[\"type\"] == \"http\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n path = os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n if path.startswith(\"..\"):\n return PlainTextResponse(\"Not Found\", status_code=404)\n path = os.path.join(self.directory, path)\n if self.config_checked:\n check_directory = None\n else:\n check_directory = self.directory\n self.config_checked = True\n return _StaticFilesResponder(scope, path=path, check_directory=check_directory)\n\n\nclass _StaticFilesResponder:\n def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:\n self.scope = scope\n self.path = path\n self.check_directory = check_directory\n\n async def check_directory_configured_correctly(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n directory = self.check_directory\n try:\n stat_result = await aio_stat(directory)\n except FileNotFoundError:\n raise RuntimeError(\"StaticFiles directory '%s' does not exist.\" % directory)\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\"StaticFiles path '%s' is not a directory.\" % directory)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n if self.check_directory is not None:\n await self.check_directory_configured_correctly()\n\n try:\n stat_result = await aio_stat(self.path)\n except FileNotFoundError:\n response = PlainTextResponse(\"Not Found\", status_code=404) # type: Response\n else:\n mode = stat_result.st_mode\n if not stat.S_ISREG(mode):\n response = PlainTextResponse(\"Not Found\", status_code=404)\n else:\n response = FileResponse(self.path, stat_result=stat_result)\n\n await response(receive, send)\n", "path": "starlette/staticfiles.py"}]} | 995 | 127 |
gh_patches_debug_3809 | rasdani/github-patches | git_diff | saleor__saleor-1155 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
update_index not working with Elasticsearch 5.4
When running `python manage.py update_index` the following errors occurs:
```
elasticsearch.exceptions.RequestError: TransportError(400, 'No handler found for uri [//storefront__userprofile_user] and method [DELETE]')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/search/backends/dashboard.py`
Content:
```
1 from collections import defaultdict
2
3 from . import get_search_backend
4 from .base import BaseSearchQuery
5 from ..index import get_indexed_models
6
7 CONTENT_TYPES_MAP = {
8 model.indexed_get_content_type(): model
9 for model in get_indexed_models()}
10
11 DEFAULT_BACKEND = get_search_backend('default')
12 DEFAULT_BACKEND_CLASS = DEFAULT_BACKEND.__class__
13 DEFAULT_BACKEND_RESULTS_CLASS = DEFAULT_BACKEND.results_class
14
15
16 class DashboardSearchQuery(BaseSearchQuery):
17 """
18 Query that will search in multiple indexes
19 """
20
21 def __init__(self, query_string,
22 fields=None, operator=None, order_by_relevance=True,
23 queryset_map=None):
24 if queryset_map:
25 queryset_map = {model.indexed_get_content_type(): queryset
26 for model, queryset in queryset_map.items()}
27 else:
28 queryset_map = {content_type: model.objects.all()
29 for content_type, model in CONTENT_TYPES_MAP.items()}
30 self.queryset_map = queryset_map
31 super(DashboardSearchQuery, self).__init__(
32 query_string=query_string, queryset=None, fields=fields,
33 operator=operator, order_by_relevance=order_by_relevance)
34
35 def get_inner_query(self):
36 if self.query_string is not None:
37 fields = self.fields or ['_all', '_partials']
38
39 if len(fields) == 1:
40 if self.operator == 'or':
41 query = {
42 'match': {
43 fields[0]: self.query_string,
44 }
45 }
46 else:
47 query = {
48 'match': {
49 fields[0]: {
50 'query': self.query_string,
51 'operator': self.operator,
52 }
53 }
54 }
55 else:
56 query = {
57 'multi_match': {
58 'query': self.query_string,
59 'fields': fields,
60 }
61 }
62
63 if self.operator != 'or':
64 query['multi_match']['operator'] = self.operator
65 else:
66 query = {
67 'match_all': {}
68 }
69
70 return query
71
72 def get_query(self):
73 return self.get_inner_query()
74
75
76 class DashboardSearchResults(DEFAULT_BACKEND_RESULTS_CLASS):
77
78 def _do_search(self):
79 # Params for elasticsearch query
80 params = dict(
81 body=self._get_es_body(),
82 _source=False,
83 from_=self.start,
84 index='{}*'.format(self.backend.get_index().name)
85 )
86 params[self.fields_param_name] = 'pk'
87
88 # Add size if set
89 if self.stop is not None:
90 params['size'] = self.stop - self.start
91 # Send to Elasticsearch
92 hits = self.backend.es.search(**params)
93 search_hits = defaultdict(list)
94 scores = {}
95 for hit in hits['hits']['hits']:
96 hit_type = hit['_type']
97 hit_pk = hit['fields']['pk'][0]
98 search_hits[hit_type].append(hit_pk)
99 scores[hit['_id']] = hit['_score']
100
101 # Group results by content type
102 results_by_model = {}
103 for content_type, hit_pks in search_hits.items():
104 queryset = self.query.queryset_map[content_type]
105 results_by_model[content_type] = queryset.filter(pk__in=hit_pks)
106
107 # Merge results back in one list ordered by search score
108 all_results = []
109 for content_type, hits in results_by_model.items():
110 for hit in hits:
111 score_key = '%s:%d' % (content_type, hit.pk)
112 setattr(hit, 'search_score', scores[score_key])
113 setattr(hit, 'content_type', content_type)
114 all_results.append(hit)
115 sorted_results = sorted(
116 all_results, key=lambda h: h.search_score, reverse=True)
117 return list(sorted_results)
118
119 def _get_es_body(self, for_count=False):
120 body = {
121 'query': self.query.get_query()
122 }
123
124 if not for_count:
125 sort = None
126
127 if sort is not None:
128 body['sort'] = sort
129
130 return body
131
132 def _do_count(self):
133 # Get count
134 hit_count = self.backend.es.count(
135 body=self._get_es_body(for_count=True),
136 index='{}*'.format(self.backend.get_index().name)
137 )['count']
138 # Add limits
139 hit_count -= self.start
140 if self.stop is not None:
141 hit_count = min(hit_count, self.stop - self.start)
142
143 return max(hit_count, 0)
144
145
146 class DashboardMultiTypeSearchBackend(DEFAULT_BACKEND_CLASS):
147 results_class = DashboardSearchResults
148 query_class = DashboardSearchQuery
149
150 def search(self, query_string,
151 model_or_queryset=None, fields=None, filters=None,
152 prefetch_related=None, operator=None, order_by_relevance=True,
153 queryset_map=None):
154 """
155 Multi-model search. Parameters that affect model or database
156 structure are skipped and not used in dashboard query implementation.
157 """
158 search_query = self.query_class(
159 query_string=query_string, fields=fields, operator=operator,
160 order_by_relevance=order_by_relevance, queryset_map=queryset_map)
161 return self.results_class(self, search_query)
162
163 SearchBackend = DashboardMultiTypeSearchBackend
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/search/backends/dashboard.py b/saleor/search/backends/dashboard.py
--- a/saleor/search/backends/dashboard.py
+++ b/saleor/search/backends/dashboard.py
@@ -94,7 +94,7 @@
scores = {}
for hit in hits['hits']['hits']:
hit_type = hit['_type']
- hit_pk = hit['fields']['pk'][0]
+ hit_pk = hit['_source']['pk']
search_hits[hit_type].append(hit_pk)
scores[hit['_id']] = hit['_score']
| {"golden_diff": "diff --git a/saleor/search/backends/dashboard.py b/saleor/search/backends/dashboard.py\n--- a/saleor/search/backends/dashboard.py\n+++ b/saleor/search/backends/dashboard.py\n@@ -94,7 +94,7 @@\n scores = {}\n for hit in hits['hits']['hits']:\n hit_type = hit['_type']\n- hit_pk = hit['fields']['pk'][0]\n+ hit_pk = hit['_source']['pk']\n search_hits[hit_type].append(hit_pk)\n scores[hit['_id']] = hit['_score']\n", "issue": "update_index not working with Elasticsearch 5.4\nWhen running `python manage.py update_index` the following errors occurs:\r\n```\r\nelasticsearch.exceptions.RequestError: TransportError(400, 'No handler found for uri [//storefront__userprofile_user] and method [DELETE]')\r\n```\n", "before_files": [{"content": "from collections import defaultdict\n\nfrom . import get_search_backend\nfrom .base import BaseSearchQuery\nfrom ..index import get_indexed_models\n\nCONTENT_TYPES_MAP = {\n model.indexed_get_content_type(): model\n for model in get_indexed_models()}\n\nDEFAULT_BACKEND = get_search_backend('default')\nDEFAULT_BACKEND_CLASS = DEFAULT_BACKEND.__class__\nDEFAULT_BACKEND_RESULTS_CLASS = DEFAULT_BACKEND.results_class\n\n\nclass DashboardSearchQuery(BaseSearchQuery):\n \"\"\"\n Query that will search in multiple indexes\n \"\"\"\n\n def __init__(self, query_string,\n fields=None, operator=None, order_by_relevance=True,\n queryset_map=None):\n if queryset_map:\n queryset_map = {model.indexed_get_content_type(): queryset\n for model, queryset in queryset_map.items()}\n else:\n queryset_map = {content_type: model.objects.all()\n for content_type, model in CONTENT_TYPES_MAP.items()}\n self.queryset_map = queryset_map\n super(DashboardSearchQuery, self).__init__(\n query_string=query_string, queryset=None, fields=fields,\n operator=operator, order_by_relevance=order_by_relevance)\n\n def get_inner_query(self):\n if self.query_string is not None:\n fields = self.fields or ['_all', '_partials']\n\n if len(fields) == 1:\n if self.operator == 'or':\n query = {\n 'match': {\n fields[0]: self.query_string,\n }\n }\n else:\n query = {\n 'match': {\n fields[0]: {\n 'query': self.query_string,\n 'operator': self.operator,\n }\n }\n }\n else:\n query = {\n 'multi_match': {\n 'query': self.query_string,\n 'fields': fields,\n }\n }\n\n if self.operator != 'or':\n query['multi_match']['operator'] = self.operator\n else:\n query = {\n 'match_all': {}\n }\n\n return query\n\n def get_query(self):\n return self.get_inner_query()\n\n\nclass DashboardSearchResults(DEFAULT_BACKEND_RESULTS_CLASS):\n\n def _do_search(self):\n # Params for elasticsearch query\n params = dict(\n body=self._get_es_body(),\n _source=False,\n from_=self.start,\n index='{}*'.format(self.backend.get_index().name)\n )\n params[self.fields_param_name] = 'pk'\n\n # Add size if set\n if self.stop is not None:\n params['size'] = self.stop - self.start\n # Send to Elasticsearch\n hits = self.backend.es.search(**params)\n search_hits = defaultdict(list)\n scores = {}\n for hit in hits['hits']['hits']:\n hit_type = hit['_type']\n hit_pk = hit['fields']['pk'][0]\n search_hits[hit_type].append(hit_pk)\n scores[hit['_id']] = hit['_score']\n\n # Group results by content type\n results_by_model = {}\n for content_type, hit_pks in search_hits.items():\n queryset = self.query.queryset_map[content_type]\n results_by_model[content_type] = queryset.filter(pk__in=hit_pks)\n\n # Merge results back in one list ordered by search score\n all_results = []\n for content_type, hits in results_by_model.items():\n for hit in hits:\n score_key = '%s:%d' % (content_type, hit.pk)\n setattr(hit, 'search_score', scores[score_key])\n setattr(hit, 'content_type', content_type)\n all_results.append(hit)\n sorted_results = sorted(\n all_results, key=lambda h: h.search_score, reverse=True)\n return list(sorted_results)\n\n def _get_es_body(self, for_count=False):\n body = {\n 'query': self.query.get_query()\n }\n\n if not for_count:\n sort = None\n\n if sort is not None:\n body['sort'] = sort\n\n return body\n\n def _do_count(self):\n # Get count\n hit_count = self.backend.es.count(\n body=self._get_es_body(for_count=True),\n index='{}*'.format(self.backend.get_index().name)\n )['count']\n # Add limits\n hit_count -= self.start\n if self.stop is not None:\n hit_count = min(hit_count, self.stop - self.start)\n\n return max(hit_count, 0)\n\n\nclass DashboardMultiTypeSearchBackend(DEFAULT_BACKEND_CLASS):\n results_class = DashboardSearchResults\n query_class = DashboardSearchQuery\n\n def search(self, query_string,\n model_or_queryset=None, fields=None, filters=None,\n prefetch_related=None, operator=None, order_by_relevance=True,\n queryset_map=None):\n \"\"\"\n Multi-model search. Parameters that affect model or database\n structure are skipped and not used in dashboard query implementation.\n \"\"\"\n search_query = self.query_class(\n query_string=query_string, fields=fields, operator=operator,\n order_by_relevance=order_by_relevance, queryset_map=queryset_map)\n return self.results_class(self, search_query)\n\nSearchBackend = DashboardMultiTypeSearchBackend\n", "path": "saleor/search/backends/dashboard.py"}], "after_files": [{"content": "from collections import defaultdict\n\nfrom . import get_search_backend\nfrom .base import BaseSearchQuery\nfrom ..index import get_indexed_models\n\nCONTENT_TYPES_MAP = {\n model.indexed_get_content_type(): model\n for model in get_indexed_models()}\n\nDEFAULT_BACKEND = get_search_backend('default')\nDEFAULT_BACKEND_CLASS = DEFAULT_BACKEND.__class__\nDEFAULT_BACKEND_RESULTS_CLASS = DEFAULT_BACKEND.results_class\n\n\nclass DashboardSearchQuery(BaseSearchQuery):\n \"\"\"\n Query that will search in multiple indexes\n \"\"\"\n\n def __init__(self, query_string,\n fields=None, operator=None, order_by_relevance=True,\n queryset_map=None):\n if queryset_map:\n queryset_map = {model.indexed_get_content_type(): queryset\n for model, queryset in queryset_map.items()}\n else:\n queryset_map = {content_type: model.objects.all()\n for content_type, model in CONTENT_TYPES_MAP.items()}\n self.queryset_map = queryset_map\n super(DashboardSearchQuery, self).__init__(\n query_string=query_string, queryset=None, fields=fields,\n operator=operator, order_by_relevance=order_by_relevance)\n\n def get_inner_query(self):\n if self.query_string is not None:\n fields = self.fields or ['_all', '_partials']\n\n if len(fields) == 1:\n if self.operator == 'or':\n query = {\n 'match': {\n fields[0]: self.query_string,\n }\n }\n else:\n query = {\n 'match': {\n fields[0]: {\n 'query': self.query_string,\n 'operator': self.operator,\n }\n }\n }\n else:\n query = {\n 'multi_match': {\n 'query': self.query_string,\n 'fields': fields,\n }\n }\n\n if self.operator != 'or':\n query['multi_match']['operator'] = self.operator\n else:\n query = {\n 'match_all': {}\n }\n\n return query\n\n def get_query(self):\n return self.get_inner_query()\n\n\nclass DashboardSearchResults(DEFAULT_BACKEND_RESULTS_CLASS):\n\n def _do_search(self):\n # Params for elasticsearch query\n params = dict(\n body=self._get_es_body(),\n _source=False,\n from_=self.start,\n index='{}*'.format(self.backend.get_index().name)\n )\n params[self.fields_param_name] = 'pk'\n\n # Add size if set\n if self.stop is not None:\n params['size'] = self.stop - self.start\n # Send to Elasticsearch\n hits = self.backend.es.search(**params)\n search_hits = defaultdict(list)\n scores = {}\n for hit in hits['hits']['hits']:\n hit_type = hit['_type']\n hit_pk = hit['_source']['pk']\n search_hits[hit_type].append(hit_pk)\n scores[hit['_id']] = hit['_score']\n\n # Group results by content type\n results_by_model = {}\n for content_type, hit_pks in search_hits.items():\n queryset = self.query.queryset_map[content_type]\n results_by_model[content_type] = queryset.filter(pk__in=hit_pks)\n\n # Merge results back in one list ordered by search score\n all_results = []\n for content_type, hits in results_by_model.items():\n for hit in hits:\n score_key = '%s:%d' % (content_type, hit.pk)\n setattr(hit, 'search_score', scores[score_key])\n setattr(hit, 'content_type', content_type)\n all_results.append(hit)\n sorted_results = sorted(\n all_results, key=lambda h: h.search_score, reverse=True)\n return list(sorted_results)\n\n def _get_es_body(self, for_count=False):\n body = {\n 'query': self.query.get_query()\n }\n\n if not for_count:\n sort = None\n\n if sort is not None:\n body['sort'] = sort\n\n return body\n\n def _do_count(self):\n # Get count\n hit_count = self.backend.es.count(\n body=self._get_es_body(for_count=True),\n index='{}*'.format(self.backend.get_index().name)\n )['count']\n # Add limits\n hit_count -= self.start\n if self.stop is not None:\n hit_count = min(hit_count, self.stop - self.start)\n\n return max(hit_count, 0)\n\n\nclass DashboardMultiTypeSearchBackend(DEFAULT_BACKEND_CLASS):\n results_class = DashboardSearchResults\n query_class = DashboardSearchQuery\n\n def search(self, query_string,\n model_or_queryset=None, fields=None, filters=None,\n prefetch_related=None, operator=None, order_by_relevance=True,\n queryset_map=None):\n \"\"\"\n Multi-model search. Parameters that affect model or database\n structure are skipped and not used in dashboard query implementation.\n \"\"\"\n search_query = self.query_class(\n query_string=query_string, fields=fields, operator=operator,\n order_by_relevance=order_by_relevance, queryset_map=queryset_map)\n return self.results_class(self, search_query)\n\nSearchBackend = DashboardMultiTypeSearchBackend\n", "path": "saleor/search/backends/dashboard.py"}]} | 1,811 | 125 |
gh_patches_debug_14985 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2513 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Don't validate SAM transformed resources for rule I3042
### CloudFormation Lint Version
v0.71.1
### What operating system are you using?
Mac
### Describe the bug
When SAM transforms templates it can create hardcoded ARNs based on its scenario. It would make sense to not validate those ARNs against rule I3042
### Expected behavior
To not raise I3042 on resources that are created by SAM transform.
### Reproduction template
```yaml
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/HardCodedArnProperties.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import re
6
7 from cfnlint.rules import CloudFormationLintRule, RuleMatch
8
9
10 class HardCodedArnProperties(CloudFormationLintRule):
11 """Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number"""
12
13 id = "I3042"
14 shortdesc = "ARNs should use correctly placed Pseudo Parameters"
15 description = "Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number"
16 source_url = ""
17 tags = ["resources"]
18 regex = re.compile(
19 r"arn:(\$\{[^:]*::[^:]*}|[^:]*):[^:]+:(\$\{[^:]*::[^:]*}|[^:]*):(\$\{[^:]*::[^:]*}|[^:]*)"
20 )
21
22 def __init__(self):
23 """Init"""
24 super().__init__()
25 self.config_definition = {
26 "partition": {
27 "default": True,
28 "type": "boolean",
29 },
30 "region": {
31 "default": False,
32 "type": "boolean",
33 },
34 "accountId": {
35 "default": False,
36 "type": "boolean",
37 },
38 }
39 self.configure()
40
41 def _match_values(self, cfnelem, path):
42 """Recursively search for values matching the searchRegex"""
43 values = []
44 if isinstance(cfnelem, dict):
45 for key in cfnelem:
46 pathprop = path[:]
47 pathprop.append(key)
48 values.extend(self._match_values(cfnelem[key], pathprop))
49 elif isinstance(cfnelem, list):
50 for index, item in enumerate(cfnelem):
51 pathprop = path[:]
52 pathprop.append(index)
53 values.extend(self._match_values(item, pathprop))
54 else:
55 # Leaf node
56 if isinstance(cfnelem, str): # and re.match(searchRegex, cfnelem):
57 for variable in re.findall(self.regex, cfnelem):
58 if "Fn::Sub" in path:
59 values.append(path + [variable])
60
61 return values
62
63 def match_values(self, cfn):
64 """
65 Search for values in all parts of the templates that match the searchRegex
66 """
67 results = []
68 results.extend(self._match_values(cfn.template.get("Resources", {}), []))
69 # Globals are removed during a transform. They need to be checked manually
70 results.extend(self._match_values(cfn.template.get("Globals", {}), []))
71 return results
72
73 def match(self, cfn):
74 """Check CloudFormation Resources"""
75 matches = []
76
77 # Get a list of paths to every leaf node string containing at least one ${parameter}
78 parameter_string_paths = self.match_values(cfn)
79 # We want to search all of the paths to check if each one contains an 'Fn::Sub'
80 for parameter_string_path in parameter_string_paths:
81 path = ["Resources"] + parameter_string_path[:-1]
82 candidate = parameter_string_path[-1]
83
84 # !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
85 # is valid even with aws as the account #. This handles empty string
86 if self.config["partition"] and not re.match(
87 r"^\$\{\w+}|\$\{AWS::Partition}|$", candidate[0]
88 ):
89 # or not re.match(r'^(\$\{\w+}|\$\{AWS::Region}|)$', candidate[1]) or not re.match(r'^\$\{\w+}|\$\{AWS::AccountId}|aws|$', candidate[2]):
90 message = "ARN in Resource {0} contains hardcoded Partition in ARN or incorrectly placed Pseudo Parameters"
91 matches.append(RuleMatch(path, message.format(path[1])))
92 if self.config["region"] and not re.match(
93 r"^(\$\{\w+}|\$\{AWS::Region}|)$", candidate[1]
94 ):
95 # or or not re.match(r'^\$\{\w+}|\$\{AWS::AccountId}|aws|$', candidate[2]):
96 message = "ARN in Resource {0} contains hardcoded Region in ARN or incorrectly placed Pseudo Parameters"
97 matches.append(RuleMatch(path, message.format(path[1])))
98 if self.config["accountId"] and not re.match(
99 r"^\$\{\w+}|\$\{AWS::AccountId}|aws|$", candidate[2]
100 ):
101 message = "ARN in Resource {0} contains hardcoded AccountId in ARN or incorrectly placed Pseudo Parameters"
102 matches.append(RuleMatch(path, message.format(path[1])))
103
104 return matches
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/HardCodedArnProperties.py b/src/cfnlint/rules/resources/HardCodedArnProperties.py
--- a/src/cfnlint/rules/resources/HardCodedArnProperties.py
+++ b/src/cfnlint/rules/resources/HardCodedArnProperties.py
@@ -71,9 +71,13 @@
return results
def match(self, cfn):
- """Check CloudFormation Resources"""
matches = []
+ transforms = cfn.transform_pre["Transform"]
+ transforms = transforms if isinstance(transforms, list) else [transforms]
+ if "AWS::Serverless-2016-10-31" in cfn.transform_pre["Transform"]:
+ return matches
+
# Get a list of paths to every leaf node string containing at least one ${parameter}
parameter_string_paths = self.match_values(cfn)
# We want to search all of the paths to check if each one contains an 'Fn::Sub'
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/HardCodedArnProperties.py b/src/cfnlint/rules/resources/HardCodedArnProperties.py\n--- a/src/cfnlint/rules/resources/HardCodedArnProperties.py\n+++ b/src/cfnlint/rules/resources/HardCodedArnProperties.py\n@@ -71,9 +71,13 @@\n return results\r\n \r\n def match(self, cfn):\r\n- \"\"\"Check CloudFormation Resources\"\"\"\r\n matches = []\r\n \r\n+ transforms = cfn.transform_pre[\"Transform\"]\r\n+ transforms = transforms if isinstance(transforms, list) else [transforms]\r\n+ if \"AWS::Serverless-2016-10-31\" in cfn.transform_pre[\"Transform\"]:\r\n+ return matches\r\n+\r\n # Get a list of paths to every leaf node string containing at least one ${parameter}\r\n parameter_string_paths = self.match_values(cfn)\r\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n", "issue": "Don't validate SAM transformed resources for rule I3042\n### CloudFormation Lint Version\n\nv0.71.1\n\n### What operating system are you using?\n\nMac\n\n### Describe the bug\n\nWhen SAM transforms templates it can create hardcoded ARNs based on its scenario. It would make sense to not validate those ARNs against rule I3042\n\n### Expected behavior\n\nTo not raise I3042 on resources that are created by SAM transform.\n\n### Reproduction template\n\n```yaml\r\n\r\n```\n", "before_files": [{"content": "\"\"\"\r\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\r\nSPDX-License-Identifier: MIT-0\r\n\"\"\"\r\nimport re\r\n\r\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\r\n\r\n\r\nclass HardCodedArnProperties(CloudFormationLintRule):\r\n \"\"\"Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number\"\"\"\r\n\r\n id = \"I3042\"\r\n shortdesc = \"ARNs should use correctly placed Pseudo Parameters\"\r\n description = \"Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number\"\r\n source_url = \"\"\r\n tags = [\"resources\"]\r\n regex = re.compile(\r\n r\"arn:(\\$\\{[^:]*::[^:]*}|[^:]*):[^:]+:(\\$\\{[^:]*::[^:]*}|[^:]*):(\\$\\{[^:]*::[^:]*}|[^:]*)\"\r\n )\r\n\r\n def __init__(self):\r\n \"\"\"Init\"\"\"\r\n super().__init__()\r\n self.config_definition = {\r\n \"partition\": {\r\n \"default\": True,\r\n \"type\": \"boolean\",\r\n },\r\n \"region\": {\r\n \"default\": False,\r\n \"type\": \"boolean\",\r\n },\r\n \"accountId\": {\r\n \"default\": False,\r\n \"type\": \"boolean\",\r\n },\r\n }\r\n self.configure()\r\n\r\n def _match_values(self, cfnelem, path):\r\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\r\n values = []\r\n if isinstance(cfnelem, dict):\r\n for key in cfnelem:\r\n pathprop = path[:]\r\n pathprop.append(key)\r\n values.extend(self._match_values(cfnelem[key], pathprop))\r\n elif isinstance(cfnelem, list):\r\n for index, item in enumerate(cfnelem):\r\n pathprop = path[:]\r\n pathprop.append(index)\r\n values.extend(self._match_values(item, pathprop))\r\n else:\r\n # Leaf node\r\n if isinstance(cfnelem, str): # and re.match(searchRegex, cfnelem):\r\n for variable in re.findall(self.regex, cfnelem):\r\n if \"Fn::Sub\" in path:\r\n values.append(path + [variable])\r\n\r\n return values\r\n\r\n def match_values(self, cfn):\r\n \"\"\"\r\n Search for values in all parts of the templates that match the searchRegex\r\n \"\"\"\r\n results = []\r\n results.extend(self._match_values(cfn.template.get(\"Resources\", {}), []))\r\n # Globals are removed during a transform. They need to be checked manually\r\n results.extend(self._match_values(cfn.template.get(\"Globals\", {}), []))\r\n return results\r\n\r\n def match(self, cfn):\r\n \"\"\"Check CloudFormation Resources\"\"\"\r\n matches = []\r\n\r\n # Get a list of paths to every leaf node string containing at least one ${parameter}\r\n parameter_string_paths = self.match_values(cfn)\r\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\r\n for parameter_string_path in parameter_string_paths:\r\n path = [\"Resources\"] + parameter_string_path[:-1]\r\n candidate = parameter_string_path[-1]\r\n\r\n # !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole\r\n # is valid even with aws as the account #. This handles empty string\r\n if self.config[\"partition\"] and not re.match(\r\n r\"^\\$\\{\\w+}|\\$\\{AWS::Partition}|$\", candidate[0]\r\n ):\r\n # or not re.match(r'^(\\$\\{\\w+}|\\$\\{AWS::Region}|)$', candidate[1]) or not re.match(r'^\\$\\{\\w+}|\\$\\{AWS::AccountId}|aws|$', candidate[2]):\r\n message = \"ARN in Resource {0} contains hardcoded Partition in ARN or incorrectly placed Pseudo Parameters\"\r\n matches.append(RuleMatch(path, message.format(path[1])))\r\n if self.config[\"region\"] and not re.match(\r\n r\"^(\\$\\{\\w+}|\\$\\{AWS::Region}|)$\", candidate[1]\r\n ):\r\n # or or not re.match(r'^\\$\\{\\w+}|\\$\\{AWS::AccountId}|aws|$', candidate[2]):\r\n message = \"ARN in Resource {0} contains hardcoded Region in ARN or incorrectly placed Pseudo Parameters\"\r\n matches.append(RuleMatch(path, message.format(path[1])))\r\n if self.config[\"accountId\"] and not re.match(\r\n r\"^\\$\\{\\w+}|\\$\\{AWS::AccountId}|aws|$\", candidate[2]\r\n ):\r\n message = \"ARN in Resource {0} contains hardcoded AccountId in ARN or incorrectly placed Pseudo Parameters\"\r\n matches.append(RuleMatch(path, message.format(path[1])))\r\n\r\n return matches\r\n", "path": "src/cfnlint/rules/resources/HardCodedArnProperties.py"}], "after_files": [{"content": "\"\"\"\r\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\r\nSPDX-License-Identifier: MIT-0\r\n\"\"\"\r\nimport re\r\n\r\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\r\n\r\n\r\nclass HardCodedArnProperties(CloudFormationLintRule):\r\n \"\"\"Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number\"\"\"\r\n\r\n id = \"I3042\"\r\n shortdesc = \"ARNs should use correctly placed Pseudo Parameters\"\r\n description = \"Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number\"\r\n source_url = \"\"\r\n tags = [\"resources\"]\r\n regex = re.compile(\r\n r\"arn:(\\$\\{[^:]*::[^:]*}|[^:]*):[^:]+:(\\$\\{[^:]*::[^:]*}|[^:]*):(\\$\\{[^:]*::[^:]*}|[^:]*)\"\r\n )\r\n\r\n def __init__(self):\r\n \"\"\"Init\"\"\"\r\n super().__init__()\r\n self.config_definition = {\r\n \"partition\": {\r\n \"default\": True,\r\n \"type\": \"boolean\",\r\n },\r\n \"region\": {\r\n \"default\": False,\r\n \"type\": \"boolean\",\r\n },\r\n \"accountId\": {\r\n \"default\": False,\r\n \"type\": \"boolean\",\r\n },\r\n }\r\n self.configure()\r\n\r\n def _match_values(self, cfnelem, path):\r\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\r\n values = []\r\n if isinstance(cfnelem, dict):\r\n for key in cfnelem:\r\n pathprop = path[:]\r\n pathprop.append(key)\r\n values.extend(self._match_values(cfnelem[key], pathprop))\r\n elif isinstance(cfnelem, list):\r\n for index, item in enumerate(cfnelem):\r\n pathprop = path[:]\r\n pathprop.append(index)\r\n values.extend(self._match_values(item, pathprop))\r\n else:\r\n # Leaf node\r\n if isinstance(cfnelem, str): # and re.match(searchRegex, cfnelem):\r\n for variable in re.findall(self.regex, cfnelem):\r\n if \"Fn::Sub\" in path:\r\n values.append(path + [variable])\r\n\r\n return values\r\n\r\n def match_values(self, cfn):\r\n \"\"\"\r\n Search for values in all parts of the templates that match the searchRegex\r\n \"\"\"\r\n results = []\r\n results.extend(self._match_values(cfn.template.get(\"Resources\", {}), []))\r\n # Globals are removed during a transform. They need to be checked manually\r\n results.extend(self._match_values(cfn.template.get(\"Globals\", {}), []))\r\n return results\r\n\r\n def match(self, cfn):\r\n matches = []\r\n\r\n transforms = cfn.transform_pre[\"Transform\"]\r\n transforms = transforms if isinstance(transforms, list) else [transforms]\r\n if \"AWS::Serverless-2016-10-31\" in cfn.transform_pre[\"Transform\"]:\r\n return matches\r\n\r\n # Get a list of paths to every leaf node string containing at least one ${parameter}\r\n parameter_string_paths = self.match_values(cfn)\r\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\r\n for parameter_string_path in parameter_string_paths:\r\n path = [\"Resources\"] + parameter_string_path[:-1]\r\n candidate = parameter_string_path[-1]\r\n\r\n # !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole\r\n # is valid even with aws as the account #. This handles empty string\r\n if self.config[\"partition\"] and not re.match(\r\n r\"^\\$\\{\\w+}|\\$\\{AWS::Partition}|$\", candidate[0]\r\n ):\r\n # or not re.match(r'^(\\$\\{\\w+}|\\$\\{AWS::Region}|)$', candidate[1]) or not re.match(r'^\\$\\{\\w+}|\\$\\{AWS::AccountId}|aws|$', candidate[2]):\r\n message = \"ARN in Resource {0} contains hardcoded Partition in ARN or incorrectly placed Pseudo Parameters\"\r\n matches.append(RuleMatch(path, message.format(path[1])))\r\n if self.config[\"region\"] and not re.match(\r\n r\"^(\\$\\{\\w+}|\\$\\{AWS::Region}|)$\", candidate[1]\r\n ):\r\n # or or not re.match(r'^\\$\\{\\w+}|\\$\\{AWS::AccountId}|aws|$', candidate[2]):\r\n message = \"ARN in Resource {0} contains hardcoded Region in ARN or incorrectly placed Pseudo Parameters\"\r\n matches.append(RuleMatch(path, message.format(path[1])))\r\n if self.config[\"accountId\"] and not re.match(\r\n r\"^\\$\\{\\w+}|\\$\\{AWS::AccountId}|aws|$\", candidate[2]\r\n ):\r\n message = \"ARN in Resource {0} contains hardcoded AccountId in ARN or incorrectly placed Pseudo Parameters\"\r\n matches.append(RuleMatch(path, message.format(path[1])))\r\n\r\n return matches\r\n", "path": "src/cfnlint/rules/resources/HardCodedArnProperties.py"}]} | 1,616 | 218 |
gh_patches_debug_32901 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-4742 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logger les suppressions de galleries et de publications
Logger juste le fait que ce soit une suppression, le type d’objet concerné et le slug histoire qu’on puisse facilement remonter aux logs de nginx correspondantes avec la date et l’heures en cas de problème.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/tutorialv2/receivers.py`
Content:
```
1 # coding: utf-8
2
3
4 import datetime
5 from django.dispatch.dispatcher import receiver
6 from django.utils.translation import ugettext_lazy as _
7 from zds.tutorialv2.models.models_database import PublishableContent
8 from zds.tutorialv2.signals import content_unpublished
9 from zds.utils import get_current_user
10 from zds.utils.models import Alert
11
12
13 @receiver(content_unpublished, sender=PublishableContent)
14 def cleanup_validation_alerts(sender, instance, **kwargs):
15 """
16 When opinions are unpublished (probably permanently), we must be sure all alerts are handled. For now we just \
17 resolve them.
18
19 :param sender: sender class
20 :param instance: object instance
21 :param kwargs: possibily moderator
22 """
23 if instance.is_opinion:
24 moderator = kwargs.get('moderator', get_current_user())
25 Alert.objects.filter(scope='CONTENT', content=instance).update(moderator=moderator,
26 resolve_reason=_('Le billet a été dépublié.'),
27 solved_date=datetime.datetime.now(),
28 solved=True)
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/tutorialv2/receivers.py b/zds/tutorialv2/receivers.py
--- a/zds/tutorialv2/receivers.py
+++ b/zds/tutorialv2/receivers.py
@@ -2,10 +2,15 @@
import datetime
+import logging
+
from django.dispatch.dispatcher import receiver
from django.utils.translation import ugettext_lazy as _
+from django.db import models
+
from zds.tutorialv2.models.models_database import PublishableContent
from zds.tutorialv2.signals import content_unpublished
+from zds.gallery.models import Gallery
from zds.utils import get_current_user
from zds.utils.models import Alert
@@ -26,3 +31,25 @@
resolve_reason=_('Le billet a été dépublié.'),
solved_date=datetime.datetime.now(),
solved=True)
+
+
+@receiver(models.signals.post_delete, sender=Gallery)
+@receiver(models.signals.post_delete, sender=PublishableContent)
+def log_content_deletion(sender, instance, **kwargs):
+ """
+ When a content or gallery is deleted, this action is logged.
+ """
+
+ logger = logging.getLogger(__name__)
+ current_user = get_current_user()
+
+ if current_user is None:
+ logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted. User not found.',
+ {'instance_model': type(instance).__name__, 'instance_pk': instance.pk,
+ 'instance_slug': instance.slug})
+ else:
+ logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted '
+ 'by user #%(user_pk)d (%(username)s).', {'instance_model': type(instance).__name__,
+ 'instance_pk': instance.pk, 'instance_slug': instance.slug,
+ 'user_pk': current_user.pk,
+ 'username': current_user.username})
| {"golden_diff": "diff --git a/zds/tutorialv2/receivers.py b/zds/tutorialv2/receivers.py\n--- a/zds/tutorialv2/receivers.py\n+++ b/zds/tutorialv2/receivers.py\n@@ -2,10 +2,15 @@\n \n \n import datetime\n+import logging\n+\n from django.dispatch.dispatcher import receiver\n from django.utils.translation import ugettext_lazy as _\n+from django.db import models\n+\n from zds.tutorialv2.models.models_database import PublishableContent\n from zds.tutorialv2.signals import content_unpublished\n+from zds.gallery.models import Gallery\n from zds.utils import get_current_user\n from zds.utils.models import Alert\n \n@@ -26,3 +31,25 @@\n resolve_reason=_('Le billet a \u00e9t\u00e9 d\u00e9publi\u00e9.'),\n solved_date=datetime.datetime.now(),\n solved=True)\n+\n+\n+@receiver(models.signals.post_delete, sender=Gallery)\n+@receiver(models.signals.post_delete, sender=PublishableContent)\n+def log_content_deletion(sender, instance, **kwargs):\n+ \"\"\"\n+ When a content or gallery is deleted, this action is logged.\n+ \"\"\"\n+\n+ logger = logging.getLogger(__name__)\n+ current_user = get_current_user()\n+\n+ if current_user is None:\n+ logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted. User not found.',\n+ {'instance_model': type(instance).__name__, 'instance_pk': instance.pk,\n+ 'instance_slug': instance.slug})\n+ else:\n+ logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted '\n+ 'by user #%(user_pk)d (%(username)s).', {'instance_model': type(instance).__name__,\n+ 'instance_pk': instance.pk, 'instance_slug': instance.slug,\n+ 'user_pk': current_user.pk,\n+ 'username': current_user.username})\n", "issue": "Logger les suppressions de galleries et de publications\nLogger juste le fait que ce soit une suppression, le type d\u2019objet concern\u00e9 et le slug histoire qu\u2019on puisse facilement remonter aux logs de nginx correspondantes avec la date et l\u2019heures en cas de probl\u00e8me.\n", "before_files": [{"content": "# coding: utf-8\n\n\nimport datetime\nfrom django.dispatch.dispatcher import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom zds.tutorialv2.models.models_database import PublishableContent\nfrom zds.tutorialv2.signals import content_unpublished\nfrom zds.utils import get_current_user\nfrom zds.utils.models import Alert\n\n\n@receiver(content_unpublished, sender=PublishableContent)\ndef cleanup_validation_alerts(sender, instance, **kwargs):\n \"\"\"\n When opinions are unpublished (probably permanently), we must be sure all alerts are handled. For now we just \\\n resolve them.\n\n :param sender: sender class\n :param instance: object instance\n :param kwargs: possibily moderator\n \"\"\"\n if instance.is_opinion:\n moderator = kwargs.get('moderator', get_current_user())\n Alert.objects.filter(scope='CONTENT', content=instance).update(moderator=moderator,\n resolve_reason=_('Le billet a \u00e9t\u00e9 d\u00e9publi\u00e9.'),\n solved_date=datetime.datetime.now(),\n solved=True)\n", "path": "zds/tutorialv2/receivers.py"}], "after_files": [{"content": "# coding: utf-8\n\n\nimport datetime\nimport logging\n\nfrom django.dispatch.dispatcher import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.db import models\n\nfrom zds.tutorialv2.models.models_database import PublishableContent\nfrom zds.tutorialv2.signals import content_unpublished\nfrom zds.gallery.models import Gallery\nfrom zds.utils import get_current_user\nfrom zds.utils.models import Alert\n\n\n@receiver(content_unpublished, sender=PublishableContent)\ndef cleanup_validation_alerts(sender, instance, **kwargs):\n \"\"\"\n When opinions are unpublished (probably permanently), we must be sure all alerts are handled. For now we just \\\n resolve them.\n\n :param sender: sender class\n :param instance: object instance\n :param kwargs: possibily moderator\n \"\"\"\n if instance.is_opinion:\n moderator = kwargs.get('moderator', get_current_user())\n Alert.objects.filter(scope='CONTENT', content=instance).update(moderator=moderator,\n resolve_reason=_('Le billet a \u00e9t\u00e9 d\u00e9publi\u00e9.'),\n solved_date=datetime.datetime.now(),\n solved=True)\n\n\n@receiver(models.signals.post_delete, sender=Gallery)\n@receiver(models.signals.post_delete, sender=PublishableContent)\ndef log_content_deletion(sender, instance, **kwargs):\n \"\"\"\n When a content or gallery is deleted, this action is logged.\n \"\"\"\n\n logger = logging.getLogger(__name__)\n current_user = get_current_user()\n\n if current_user is None:\n logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted. User not found.',\n {'instance_model': type(instance).__name__, 'instance_pk': instance.pk,\n 'instance_slug': instance.slug})\n else:\n logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted '\n 'by user #%(user_pk)d (%(username)s).', {'instance_model': type(instance).__name__,\n 'instance_pk': instance.pk, 'instance_slug': instance.slug,\n 'user_pk': current_user.pk,\n 'username': current_user.username})\n", "path": "zds/tutorialv2/receivers.py"}]} | 594 | 419 |
gh_patches_debug_24996 | rasdani/github-patches | git_diff | bentoml__BentoML-3941 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: BentoML Sklearn Example won't work on >= bentoml==1.0.20
### Describe the bug
Following steps from [example](https://github.com/bentoml/BentoML/tree/main/examples/sklearn/pipeline)
bentoml serve service.py:svc will produce
2023-06-08T08:24:26+0000 [ERROR] [runner:20_news_group:1] Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/starlette/routing.py", line 671, in lifespan
async with self.lifespan_context(app):
File "/usr/local/lib/python3.10/dist-packages/starlette/routing.py", line 566, in __aenter__
await self._router.startup()
File "/usr/local/lib/python3.10/dist-packages/starlette/routing.py", line 650, in startup
handler()
File "/usr/local/lib/python3.10/dist-packages/bentoml/_internal/server/runner_app.py", line 74, in _init_metrics_wrappers
self.legacy_adaptive_batch_size_hist_map = {
File "/usr/local/lib/python3.10/dist-packages/bentoml/_internal/server/runner_app.py", line 75, in <dictcomp>
method.name: metrics_client.Histogram(
File "/usr/local/lib/python3.10/dist-packages/prometheus_client/metrics.py", line 558, in __init__
super().__init__(
File "/usr/local/lib/python3.10/dist-packages/prometheus_client/metrics.py", line 130, in __init__
raise ValueError('Invalid metric name: ' + self._name)
ValueError: Invalid metric name: 20_news_group_1_predict_adaptive_batch_size
ValueError not appeared on pip3 install bentoml==1.0.19
### To reproduce
pip install bentoml==1.0.21
follow instructions from https://github.com/bentoml/BentoML/tree/main/examples/sklearn/pipeline
### Expected behavior
_No response_
### Environment
bentoml==1.0.20 or bentoml==1.0.21
Python 3.10.7
Ubuntu 22.10
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/sklearn/pipeline/service.py`
Content:
```
1 import bentoml
2 from bentoml.io import JSON
3 from bentoml.io import Text
4
5 bento_model = bentoml.sklearn.get("20_news_group:latest")
6
7 target_names = bento_model.custom_objects["target_names"]
8 model_runner = bento_model.to_runner()
9
10 svc = bentoml.Service("doc_classifier", runners=[model_runner])
11
12
13 @svc.api(input=Text(), output=JSON())
14 async def predict(input_doc: str):
15 predictions = await model_runner.predict.async_run([input_doc])
16 return {"result": target_names[predictions[0]]}
17
18
19 @svc.api(input=Text(), output=JSON())
20 async def predict_proba(input_doc: str):
21 predictions = await model_runner.predict_proba.async_run([input_doc])
22 return predictions[0]
23
```
Path: `examples/sklearn/pipeline/train.py`
Content:
```
1 import logging
2 from time import time
3 from pprint import pprint
4
5 from sklearn.datasets import fetch_20newsgroups
6 from sklearn.pipeline import Pipeline
7 from sklearn.linear_model import SGDClassifier
8 from sklearn.model_selection import GridSearchCV
9 from sklearn.feature_extraction.text import CountVectorizer
10 from sklearn.feature_extraction.text import TfidfTransformer
11
12 import bentoml
13
14 # Display progress logs on stdout
15 logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
16
17 # Load some categories from the training set
18 categories = [
19 "alt.atheism",
20 "talk.religion.misc",
21 ]
22
23 # Uncomment the following to do the analysis on all the categories
24 # categories = None
25
26 print("Loading 20 newsgroups dataset for categories:")
27 print(categories)
28
29 data = fetch_20newsgroups(subset="train", categories=categories)
30 print("%d documents" % len(data.filenames))
31 print("%d categories" % len(data.target_names))
32 print()
33
34 # Define a pipeline combining a text feature extractor with a simple classifier
35 pipeline = Pipeline(
36 [
37 ("vect", CountVectorizer()),
38 ("tfidf", TfidfTransformer()),
39 ("clf", SGDClassifier(loss="log_loss")),
40 ]
41 )
42
43 # Parameters to use for grid search. Uncommenting more parameters will give
44 # better exploring power but will increase processing time in a combinatorial
45 # way
46 parameters = {
47 "vect__max_df": (0.5, 0.75, 1.0),
48 # 'vect__max_features': (None, 5000, 10000, 50000),
49 "vect__ngram_range": ((1, 1), (1, 2)), # unigrams or bigrams
50 # 'tfidf__use_idf': (True, False),
51 # 'tfidf__norm': ('l1', 'l2'),
52 "clf__max_iter": (20,),
53 "clf__alpha": (0.00001, 0.000001),
54 "clf__penalty": ("l2", "elasticnet"),
55 # 'clf__max_iter': (10, 50, 80),
56 }
57
58 # Find the best parameters for both the feature extraction and the
59 # classifier
60 grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
61
62 print("Performing grid search...")
63 print("pipeline:", [name for name, _ in pipeline.steps])
64 print("parameters:")
65 pprint(parameters)
66 t0 = time()
67 grid_search.fit(data.data, data.target)
68 print("done in %0.3fs" % (time() - t0))
69 print()
70
71 print("Best score: %0.3f" % grid_search.best_score_)
72 best_parameters = grid_search.best_estimator_.get_params()
73 best_parameters = {
74 param_name: best_parameters[param_name] for param_name in sorted(parameters.keys())
75 }
76 print(f"Best parameters set: {best_parameters}")
77
78 bento_model = bentoml.sklearn.save_model(
79 "20_news_group",
80 grid_search.best_estimator_,
81 signatures={
82 "predict": {"batchable": True, "batch_dim": 0},
83 "predict_proba": {"batchable": True, "batch_dim": 0},
84 },
85 custom_objects={
86 "target_names": data.target_names,
87 },
88 metadata=best_parameters,
89 )
90 print(f"Model saved: {bento_model}")
91
92 # Test running inference with BentoML runner
93 test_runner = bentoml.sklearn.get("20_news_group:latest").to_runner()
94 test_runner.init_local()
95 assert test_runner.predict.run(["hello"]) == grid_search.best_estimator_.predict(
96 ["hello"]
97 )
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/sklearn/pipeline/service.py b/examples/sklearn/pipeline/service.py
--- a/examples/sklearn/pipeline/service.py
+++ b/examples/sklearn/pipeline/service.py
@@ -2,7 +2,7 @@
from bentoml.io import JSON
from bentoml.io import Text
-bento_model = bentoml.sklearn.get("20_news_group:latest")
+bento_model = bentoml.sklearn.get("twenty_news_group:latest")
target_names = bento_model.custom_objects["target_names"]
model_runner = bento_model.to_runner()
diff --git a/examples/sklearn/pipeline/train.py b/examples/sklearn/pipeline/train.py
--- a/examples/sklearn/pipeline/train.py
+++ b/examples/sklearn/pipeline/train.py
@@ -76,7 +76,7 @@
print(f"Best parameters set: {best_parameters}")
bento_model = bentoml.sklearn.save_model(
- "20_news_group",
+ "twenty_news_group",
grid_search.best_estimator_,
signatures={
"predict": {"batchable": True, "batch_dim": 0},
@@ -90,7 +90,7 @@
print(f"Model saved: {bento_model}")
# Test running inference with BentoML runner
-test_runner = bentoml.sklearn.get("20_news_group:latest").to_runner()
+test_runner = bentoml.sklearn.get("twenty_news_group:latest").to_runner()
test_runner.init_local()
assert test_runner.predict.run(["hello"]) == grid_search.best_estimator_.predict(
["hello"]
| {"golden_diff": "diff --git a/examples/sklearn/pipeline/service.py b/examples/sklearn/pipeline/service.py\n--- a/examples/sklearn/pipeline/service.py\n+++ b/examples/sklearn/pipeline/service.py\n@@ -2,7 +2,7 @@\n from bentoml.io import JSON\n from bentoml.io import Text\n \n-bento_model = bentoml.sklearn.get(\"20_news_group:latest\")\n+bento_model = bentoml.sklearn.get(\"twenty_news_group:latest\")\n \n target_names = bento_model.custom_objects[\"target_names\"]\n model_runner = bento_model.to_runner()\ndiff --git a/examples/sklearn/pipeline/train.py b/examples/sklearn/pipeline/train.py\n--- a/examples/sklearn/pipeline/train.py\n+++ b/examples/sklearn/pipeline/train.py\n@@ -76,7 +76,7 @@\n print(f\"Best parameters set: {best_parameters}\")\n \n bento_model = bentoml.sklearn.save_model(\n- \"20_news_group\",\n+ \"twenty_news_group\",\n grid_search.best_estimator_,\n signatures={\n \"predict\": {\"batchable\": True, \"batch_dim\": 0},\n@@ -90,7 +90,7 @@\n print(f\"Model saved: {bento_model}\")\n \n # Test running inference with BentoML runner\n-test_runner = bentoml.sklearn.get(\"20_news_group:latest\").to_runner()\n+test_runner = bentoml.sklearn.get(\"twenty_news_group:latest\").to_runner()\n test_runner.init_local()\n assert test_runner.predict.run([\"hello\"]) == grid_search.best_estimator_.predict(\n [\"hello\"]\n", "issue": "bug: BentoML Sklearn Example won't work on >= bentoml==1.0.20\n### Describe the bug\r\n\r\nFollowing steps from [example](https://github.com/bentoml/BentoML/tree/main/examples/sklearn/pipeline)\r\n \r\nbentoml serve service.py:svc will produce \r\n\r\n2023-06-08T08:24:26+0000 [ERROR] [runner:20_news_group:1] Traceback (most recent call last):\r\n File \"/usr/local/lib/python3.10/dist-packages/starlette/routing.py\", line 671, in lifespan\r\n async with self.lifespan_context(app):\r\n File \"/usr/local/lib/python3.10/dist-packages/starlette/routing.py\", line 566, in __aenter__\r\n await self._router.startup()\r\n File \"/usr/local/lib/python3.10/dist-packages/starlette/routing.py\", line 650, in startup\r\n handler()\r\n File \"/usr/local/lib/python3.10/dist-packages/bentoml/_internal/server/runner_app.py\", line 74, in _init_metrics_wrappers\r\n self.legacy_adaptive_batch_size_hist_map = {\r\n File \"/usr/local/lib/python3.10/dist-packages/bentoml/_internal/server/runner_app.py\", line 75, in <dictcomp>\r\n method.name: metrics_client.Histogram(\r\n File \"/usr/local/lib/python3.10/dist-packages/prometheus_client/metrics.py\", line 558, in __init__\r\n super().__init__(\r\n File \"/usr/local/lib/python3.10/dist-packages/prometheus_client/metrics.py\", line 130, in __init__\r\n raise ValueError('Invalid metric name: ' + self._name)\r\nValueError: Invalid metric name: 20_news_group_1_predict_adaptive_batch_size\r\n\r\nValueError not appeared on pip3 install bentoml==1.0.19\r\n\r\n### To reproduce\r\n\r\npip install bentoml==1.0.21\r\nfollow instructions from https://github.com/bentoml/BentoML/tree/main/examples/sklearn/pipeline\r\n\r\n### Expected behavior\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\nbentoml==1.0.20 or bentoml==1.0.21\r\nPython 3.10.7\r\nUbuntu 22.10\n", "before_files": [{"content": "import bentoml\nfrom bentoml.io import JSON\nfrom bentoml.io import Text\n\nbento_model = bentoml.sklearn.get(\"20_news_group:latest\")\n\ntarget_names = bento_model.custom_objects[\"target_names\"]\nmodel_runner = bento_model.to_runner()\n\nsvc = bentoml.Service(\"doc_classifier\", runners=[model_runner])\n\n\[email protected](input=Text(), output=JSON())\nasync def predict(input_doc: str):\n predictions = await model_runner.predict.async_run([input_doc])\n return {\"result\": target_names[predictions[0]]}\n\n\[email protected](input=Text(), output=JSON())\nasync def predict_proba(input_doc: str):\n predictions = await model_runner.predict_proba.async_run([input_doc])\n return predictions[0]\n", "path": "examples/sklearn/pipeline/service.py"}, {"content": "import logging\nfrom time import time\nfrom pprint import pprint\n\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\n\nimport bentoml\n\n# Display progress logs on stdout\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\")\n\n# Load some categories from the training set\ncategories = [\n \"alt.atheism\",\n \"talk.religion.misc\",\n]\n\n# Uncomment the following to do the analysis on all the categories\n# categories = None\n\nprint(\"Loading 20 newsgroups dataset for categories:\")\nprint(categories)\n\ndata = fetch_20newsgroups(subset=\"train\", categories=categories)\nprint(\"%d documents\" % len(data.filenames))\nprint(\"%d categories\" % len(data.target_names))\nprint()\n\n# Define a pipeline combining a text feature extractor with a simple classifier\npipeline = Pipeline(\n [\n (\"vect\", CountVectorizer()),\n (\"tfidf\", TfidfTransformer()),\n (\"clf\", SGDClassifier(loss=\"log_loss\")),\n ]\n)\n\n# Parameters to use for grid search. Uncommenting more parameters will give\n# better exploring power but will increase processing time in a combinatorial\n# way\nparameters = {\n \"vect__max_df\": (0.5, 0.75, 1.0),\n # 'vect__max_features': (None, 5000, 10000, 50000),\n \"vect__ngram_range\": ((1, 1), (1, 2)), # unigrams or bigrams\n # 'tfidf__use_idf': (True, False),\n # 'tfidf__norm': ('l1', 'l2'),\n \"clf__max_iter\": (20,),\n \"clf__alpha\": (0.00001, 0.000001),\n \"clf__penalty\": (\"l2\", \"elasticnet\"),\n # 'clf__max_iter': (10, 50, 80),\n}\n\n# Find the best parameters for both the feature extraction and the\n# classifier\ngrid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)\n\nprint(\"Performing grid search...\")\nprint(\"pipeline:\", [name for name, _ in pipeline.steps])\nprint(\"parameters:\")\npprint(parameters)\nt0 = time()\ngrid_search.fit(data.data, data.target)\nprint(\"done in %0.3fs\" % (time() - t0))\nprint()\n\nprint(\"Best score: %0.3f\" % grid_search.best_score_)\nbest_parameters = grid_search.best_estimator_.get_params()\nbest_parameters = {\n param_name: best_parameters[param_name] for param_name in sorted(parameters.keys())\n}\nprint(f\"Best parameters set: {best_parameters}\")\n\nbento_model = bentoml.sklearn.save_model(\n \"20_news_group\",\n grid_search.best_estimator_,\n signatures={\n \"predict\": {\"batchable\": True, \"batch_dim\": 0},\n \"predict_proba\": {\"batchable\": True, \"batch_dim\": 0},\n },\n custom_objects={\n \"target_names\": data.target_names,\n },\n metadata=best_parameters,\n)\nprint(f\"Model saved: {bento_model}\")\n\n# Test running inference with BentoML runner\ntest_runner = bentoml.sklearn.get(\"20_news_group:latest\").to_runner()\ntest_runner.init_local()\nassert test_runner.predict.run([\"hello\"]) == grid_search.best_estimator_.predict(\n [\"hello\"]\n)\n", "path": "examples/sklearn/pipeline/train.py"}], "after_files": [{"content": "import bentoml\nfrom bentoml.io import JSON\nfrom bentoml.io import Text\n\nbento_model = bentoml.sklearn.get(\"twenty_news_group:latest\")\n\ntarget_names = bento_model.custom_objects[\"target_names\"]\nmodel_runner = bento_model.to_runner()\n\nsvc = bentoml.Service(\"doc_classifier\", runners=[model_runner])\n\n\[email protected](input=Text(), output=JSON())\nasync def predict(input_doc: str):\n predictions = await model_runner.predict.async_run([input_doc])\n return {\"result\": target_names[predictions[0]]}\n\n\[email protected](input=Text(), output=JSON())\nasync def predict_proba(input_doc: str):\n predictions = await model_runner.predict_proba.async_run([input_doc])\n return predictions[0]\n", "path": "examples/sklearn/pipeline/service.py"}, {"content": "import logging\nfrom time import time\nfrom pprint import pprint\n\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\n\nimport bentoml\n\n# Display progress logs on stdout\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\")\n\n# Load some categories from the training set\ncategories = [\n \"alt.atheism\",\n \"talk.religion.misc\",\n]\n\n# Uncomment the following to do the analysis on all the categories\n# categories = None\n\nprint(\"Loading 20 newsgroups dataset for categories:\")\nprint(categories)\n\ndata = fetch_20newsgroups(subset=\"train\", categories=categories)\nprint(\"%d documents\" % len(data.filenames))\nprint(\"%d categories\" % len(data.target_names))\nprint()\n\n# Define a pipeline combining a text feature extractor with a simple classifier\npipeline = Pipeline(\n [\n (\"vect\", CountVectorizer()),\n (\"tfidf\", TfidfTransformer()),\n (\"clf\", SGDClassifier(loss=\"log_loss\")),\n ]\n)\n\n# Parameters to use for grid search. Uncommenting more parameters will give\n# better exploring power but will increase processing time in a combinatorial\n# way\nparameters = {\n \"vect__max_df\": (0.5, 0.75, 1.0),\n # 'vect__max_features': (None, 5000, 10000, 50000),\n \"vect__ngram_range\": ((1, 1), (1, 2)), # unigrams or bigrams\n # 'tfidf__use_idf': (True, False),\n # 'tfidf__norm': ('l1', 'l2'),\n \"clf__max_iter\": (20,),\n \"clf__alpha\": (0.00001, 0.000001),\n \"clf__penalty\": (\"l2\", \"elasticnet\"),\n # 'clf__max_iter': (10, 50, 80),\n}\n\n# Find the best parameters for both the feature extraction and the\n# classifier\ngrid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)\n\nprint(\"Performing grid search...\")\nprint(\"pipeline:\", [name for name, _ in pipeline.steps])\nprint(\"parameters:\")\npprint(parameters)\nt0 = time()\ngrid_search.fit(data.data, data.target)\nprint(\"done in %0.3fs\" % (time() - t0))\nprint()\n\nprint(\"Best score: %0.3f\" % grid_search.best_score_)\nbest_parameters = grid_search.best_estimator_.get_params()\nbest_parameters = {\n param_name: best_parameters[param_name] for param_name in sorted(parameters.keys())\n}\nprint(f\"Best parameters set: {best_parameters}\")\n\nbento_model = bentoml.sklearn.save_model(\n \"twenty_news_group\",\n grid_search.best_estimator_,\n signatures={\n \"predict\": {\"batchable\": True, \"batch_dim\": 0},\n \"predict_proba\": {\"batchable\": True, \"batch_dim\": 0},\n },\n custom_objects={\n \"target_names\": data.target_names,\n },\n metadata=best_parameters,\n)\nprint(f\"Model saved: {bento_model}\")\n\n# Test running inference with BentoML runner\ntest_runner = bentoml.sklearn.get(\"twenty_news_group:latest\").to_runner()\ntest_runner.init_local()\nassert test_runner.predict.run([\"hello\"]) == grid_search.best_estimator_.predict(\n [\"hello\"]\n)\n", "path": "examples/sklearn/pipeline/train.py"}]} | 1,994 | 347 |
gh_patches_debug_32792 | rasdani/github-patches | git_diff | openvinotoolkit__datumaro-394 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dataset patches do not remove empty (e.g. renamed) subsets
Steps to reproduce:
1. Create a project
2. Import a dataset
3. Rename a subset (e.g. `datum transform -t random_split`)
Depending on the format, the exported dataset will contain annotations from renamed-from and renamed-to subsets. This leads to duplication of annotations in different subsets, which are then found and merged together on importing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datumaro/components/converter.py`
Content:
```
1 # Copyright (C) 2019-2021 Intel Corporation
2 #
3 # SPDX-License-Identifier: MIT
4
5 from typing import Union
6 import logging as log
7 import os
8 import os.path as osp
9 import shutil
10
11 from datumaro.components.cli_plugin import CliPlugin
12 from datumaro.components.dataset import DatasetPatch
13 from datumaro.components.extractor import DatasetItem
14 from datumaro.util.image import Image
15
16
17 class Converter(CliPlugin):
18 DEFAULT_IMAGE_EXT = None
19
20 @classmethod
21 def build_cmdline_parser(cls, **kwargs):
22 parser = super().build_cmdline_parser(**kwargs)
23 parser.add_argument('--save-images', action='store_true',
24 help="Save images (default: %(default)s)")
25 parser.add_argument('--image-ext', default=None,
26 help="Image extension (default: keep or use format default%s)" % \
27 (' ' + cls.DEFAULT_IMAGE_EXT if cls.DEFAULT_IMAGE_EXT else ''))
28
29 return parser
30
31 @classmethod
32 def convert(cls, extractor, save_dir, **options):
33 converter = cls(extractor, save_dir, **options)
34 return converter.apply()
35
36 @classmethod
37 def patch(cls, dataset, patch, save_dir, **options):
38 return cls.convert(dataset, save_dir, **options)
39
40 def apply(self):
41 raise NotImplementedError("Should be implemented in a subclass")
42
43 def __init__(self, extractor, save_dir, save_images=False,
44 image_ext=None, default_image_ext=None):
45 default_image_ext = default_image_ext or self.DEFAULT_IMAGE_EXT
46 assert default_image_ext
47 self._default_image_ext = default_image_ext
48
49 self._save_images = save_images
50 self._image_ext = image_ext
51
52 self._extractor = extractor
53 self._save_dir = save_dir
54
55 # TODO: refactor this variable.
56 # Can be used by a subclass to store the current patch info
57 if isinstance(extractor, DatasetPatch.DatasetPatchWrapper):
58 self._patch = extractor.patch
59 else:
60 self._patch = None
61
62 def _find_image_ext(self, item: Union[DatasetItem, Image]):
63 src_ext = None
64
65 if isinstance(item, DatasetItem) and item.has_image:
66 src_ext = item.image.ext
67 elif isinstance(item, Image):
68 src_ext = item.ext
69
70 return self._image_ext or src_ext or self._default_image_ext
71
72 def _make_item_filename(self, item, *, name=None, subdir=None):
73 name = name or item.id
74 subdir = subdir or ''
75 return osp.join(subdir, name)
76
77 def _make_image_filename(self, item, *, name=None, subdir=None):
78 return self._make_item_filename(item, name=name, subdir=subdir) + \
79 self._find_image_ext(item)
80
81 def _make_pcd_filename(self, item, *, name=None, subdir=None):
82 return self._make_item_filename(item, name=name, subdir=subdir) + '.pcd'
83
84 def _save_image(self, item, path=None, *,
85 name=None, subdir=None, basedir=None):
86 assert not ((subdir or name or basedir) and path), \
87 "Can't use both subdir or name or basedir and path arguments"
88
89 if not item.has_image or not item.image.has_data:
90 log.warning("Item '%s' has no image", item.id)
91 return
92
93 basedir = basedir or self._save_dir
94 path = path or osp.join(basedir,
95 self._make_image_filename(item, name=name, subdir=subdir))
96 path = osp.abspath(path)
97
98 item.image.save(path)
99
100 def _save_point_cloud(self, item=None, path=None, *,
101 name=None, subdir=None, basedir=None):
102 assert not ((subdir or name or basedir) and path), \
103 "Can't use both subdir or name or basedir and path arguments"
104
105 if not item.point_cloud:
106 log.warning("Item '%s' has no pcd", item.id)
107 return
108
109 basedir = basedir or self._save_dir
110 path = path or osp.join(basedir,
111 self._make_pcd_filename(item, name=name, subdir=subdir))
112 path = osp.abspath(path)
113
114 os.makedirs(osp.dirname(path), exist_ok=True)
115 if item.point_cloud and osp.isfile(item.point_cloud):
116 if item.point_cloud != path:
117 shutil.copyfile(item.point_cloud, path)
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/datumaro/components/converter.py b/datumaro/components/converter.py
--- a/datumaro/components/converter.py
+++ b/datumaro/components/converter.py
@@ -2,6 +2,7 @@
#
# SPDX-License-Identifier: MIT
+from tempfile import mkdtemp
from typing import Union
import logging as log
import os
@@ -11,6 +12,7 @@
from datumaro.components.cli_plugin import CliPlugin
from datumaro.components.dataset import DatasetPatch
from datumaro.components.extractor import DatasetItem
+from datumaro.util import error_rollback, on_error_do
from datumaro.util.image import Image
@@ -34,8 +36,34 @@
return converter.apply()
@classmethod
+ @error_rollback
def patch(cls, dataset, patch, save_dir, **options):
- return cls.convert(dataset, save_dir, **options)
+ # This solution is not any better in performance than just
+ # writing a dataset, but in case of patching (i.e. writing
+ # to the previous location), it allows to avoid many problems
+ # with removing and replacing existing files. Surely, this
+ # approach also has problems with removal of the given directory.
+ # Problems can occur if we can't remove the directory,
+ # or want to reuse the given directory. It can happen if it
+ # is mounted or (sym-)linked.
+ # Probably, a better solution could be to wipe directory
+ # contents and write new data there. Note that directly doing this
+ # also doesn't work, because images may be needed for writing.
+
+ if not osp.isdir(save_dir):
+ return cls.convert(dataset, save_dir, **options)
+
+ tmpdir = mkdtemp(dir=osp.dirname(save_dir),
+ prefix=osp.basename(save_dir), suffix='.tmp')
+ on_error_do(shutil.rmtree, tmpdir, ignore_errors=True)
+ shutil.copymode(save_dir, tmpdir)
+
+ retval = cls.convert(dataset, tmpdir, **options)
+
+ shutil.rmtree(save_dir)
+ os.replace(tmpdir, save_dir)
+
+ return retval
def apply(self):
raise NotImplementedError("Should be implemented in a subclass")
| {"golden_diff": "diff --git a/datumaro/components/converter.py b/datumaro/components/converter.py\n--- a/datumaro/components/converter.py\n+++ b/datumaro/components/converter.py\n@@ -2,6 +2,7 @@\n #\n # SPDX-License-Identifier: MIT\n \n+from tempfile import mkdtemp\n from typing import Union\n import logging as log\n import os\n@@ -11,6 +12,7 @@\n from datumaro.components.cli_plugin import CliPlugin\n from datumaro.components.dataset import DatasetPatch\n from datumaro.components.extractor import DatasetItem\n+from datumaro.util import error_rollback, on_error_do\n from datumaro.util.image import Image\n \n \n@@ -34,8 +36,34 @@\n return converter.apply()\n \n @classmethod\n+ @error_rollback\n def patch(cls, dataset, patch, save_dir, **options):\n- return cls.convert(dataset, save_dir, **options)\n+ # This solution is not any better in performance than just\n+ # writing a dataset, but in case of patching (i.e. writing\n+ # to the previous location), it allows to avoid many problems\n+ # with removing and replacing existing files. Surely, this\n+ # approach also has problems with removal of the given directory.\n+ # Problems can occur if we can't remove the directory,\n+ # or want to reuse the given directory. It can happen if it\n+ # is mounted or (sym-)linked.\n+ # Probably, a better solution could be to wipe directory\n+ # contents and write new data there. Note that directly doing this\n+ # also doesn't work, because images may be needed for writing.\n+\n+ if not osp.isdir(save_dir):\n+ return cls.convert(dataset, save_dir, **options)\n+\n+ tmpdir = mkdtemp(dir=osp.dirname(save_dir),\n+ prefix=osp.basename(save_dir), suffix='.tmp')\n+ on_error_do(shutil.rmtree, tmpdir, ignore_errors=True)\n+ shutil.copymode(save_dir, tmpdir)\n+\n+ retval = cls.convert(dataset, tmpdir, **options)\n+\n+ shutil.rmtree(save_dir)\n+ os.replace(tmpdir, save_dir)\n+\n+ return retval\n \n def apply(self):\n raise NotImplementedError(\"Should be implemented in a subclass\")\n", "issue": "Dataset patches do not remove empty (e.g. renamed) subsets\nSteps to reproduce:\r\n1. Create a project\r\n2. Import a dataset\r\n3. Rename a subset (e.g. `datum transform -t random_split`)\r\n\r\nDepending on the format, the exported dataset will contain annotations from renamed-from and renamed-to subsets. This leads to duplication of annotations in different subsets, which are then found and merged together on importing.\n", "before_files": [{"content": "# Copyright (C) 2019-2021 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom typing import Union\nimport logging as log\nimport os\nimport os.path as osp\nimport shutil\n\nfrom datumaro.components.cli_plugin import CliPlugin\nfrom datumaro.components.dataset import DatasetPatch\nfrom datumaro.components.extractor import DatasetItem\nfrom datumaro.util.image import Image\n\n\nclass Converter(CliPlugin):\n DEFAULT_IMAGE_EXT = None\n\n @classmethod\n def build_cmdline_parser(cls, **kwargs):\n parser = super().build_cmdline_parser(**kwargs)\n parser.add_argument('--save-images', action='store_true',\n help=\"Save images (default: %(default)s)\")\n parser.add_argument('--image-ext', default=None,\n help=\"Image extension (default: keep or use format default%s)\" % \\\n (' ' + cls.DEFAULT_IMAGE_EXT if cls.DEFAULT_IMAGE_EXT else ''))\n\n return parser\n\n @classmethod\n def convert(cls, extractor, save_dir, **options):\n converter = cls(extractor, save_dir, **options)\n return converter.apply()\n\n @classmethod\n def patch(cls, dataset, patch, save_dir, **options):\n return cls.convert(dataset, save_dir, **options)\n\n def apply(self):\n raise NotImplementedError(\"Should be implemented in a subclass\")\n\n def __init__(self, extractor, save_dir, save_images=False,\n image_ext=None, default_image_ext=None):\n default_image_ext = default_image_ext or self.DEFAULT_IMAGE_EXT\n assert default_image_ext\n self._default_image_ext = default_image_ext\n\n self._save_images = save_images\n self._image_ext = image_ext\n\n self._extractor = extractor\n self._save_dir = save_dir\n\n # TODO: refactor this variable.\n # Can be used by a subclass to store the current patch info\n if isinstance(extractor, DatasetPatch.DatasetPatchWrapper):\n self._patch = extractor.patch\n else:\n self._patch = None\n\n def _find_image_ext(self, item: Union[DatasetItem, Image]):\n src_ext = None\n\n if isinstance(item, DatasetItem) and item.has_image:\n src_ext = item.image.ext\n elif isinstance(item, Image):\n src_ext = item.ext\n\n return self._image_ext or src_ext or self._default_image_ext\n\n def _make_item_filename(self, item, *, name=None, subdir=None):\n name = name or item.id\n subdir = subdir or ''\n return osp.join(subdir, name)\n\n def _make_image_filename(self, item, *, name=None, subdir=None):\n return self._make_item_filename(item, name=name, subdir=subdir) + \\\n self._find_image_ext(item)\n\n def _make_pcd_filename(self, item, *, name=None, subdir=None):\n return self._make_item_filename(item, name=name, subdir=subdir) + '.pcd'\n\n def _save_image(self, item, path=None, *,\n name=None, subdir=None, basedir=None):\n assert not ((subdir or name or basedir) and path), \\\n \"Can't use both subdir or name or basedir and path arguments\"\n\n if not item.has_image or not item.image.has_data:\n log.warning(\"Item '%s' has no image\", item.id)\n return\n\n basedir = basedir or self._save_dir\n path = path or osp.join(basedir,\n self._make_image_filename(item, name=name, subdir=subdir))\n path = osp.abspath(path)\n\n item.image.save(path)\n\n def _save_point_cloud(self, item=None, path=None, *,\n name=None, subdir=None, basedir=None):\n assert not ((subdir or name or basedir) and path), \\\n \"Can't use both subdir or name or basedir and path arguments\"\n\n if not item.point_cloud:\n log.warning(\"Item '%s' has no pcd\", item.id)\n return\n\n basedir = basedir or self._save_dir\n path = path or osp.join(basedir,\n self._make_pcd_filename(item, name=name, subdir=subdir))\n path = osp.abspath(path)\n\n os.makedirs(osp.dirname(path), exist_ok=True)\n if item.point_cloud and osp.isfile(item.point_cloud):\n if item.point_cloud != path:\n shutil.copyfile(item.point_cloud, path)\n", "path": "datumaro/components/converter.py"}], "after_files": [{"content": "# Copyright (C) 2019-2021 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom tempfile import mkdtemp\nfrom typing import Union\nimport logging as log\nimport os\nimport os.path as osp\nimport shutil\n\nfrom datumaro.components.cli_plugin import CliPlugin\nfrom datumaro.components.dataset import DatasetPatch\nfrom datumaro.components.extractor import DatasetItem\nfrom datumaro.util import error_rollback, on_error_do\nfrom datumaro.util.image import Image\n\n\nclass Converter(CliPlugin):\n DEFAULT_IMAGE_EXT = None\n\n @classmethod\n def build_cmdline_parser(cls, **kwargs):\n parser = super().build_cmdline_parser(**kwargs)\n parser.add_argument('--save-images', action='store_true',\n help=\"Save images (default: %(default)s)\")\n parser.add_argument('--image-ext', default=None,\n help=\"Image extension (default: keep or use format default%s)\" % \\\n (' ' + cls.DEFAULT_IMAGE_EXT if cls.DEFAULT_IMAGE_EXT else ''))\n\n return parser\n\n @classmethod\n def convert(cls, extractor, save_dir, **options):\n converter = cls(extractor, save_dir, **options)\n return converter.apply()\n\n @classmethod\n @error_rollback\n def patch(cls, dataset, patch, save_dir, **options):\n # This solution is not any better in performance than just\n # writing a dataset, but in case of patching (i.e. writing\n # to the previous location), it allows to avoid many problems\n # with removing and replacing existing files. Surely, this\n # approach also has problems with removal of the given directory.\n # Problems can occur if we can't remove the directory,\n # or want to reuse the given directory. It can happen if it\n # is mounted or (sym-)linked.\n # Probably, a better solution could be to wipe directory\n # contents and write new data there. Note that directly doing this\n # also doesn't work, because images may be needed for writing.\n\n if not osp.isdir(save_dir):\n return cls.convert(dataset, save_dir, **options)\n\n tmpdir = mkdtemp(dir=osp.dirname(save_dir),\n prefix=osp.basename(save_dir), suffix='.tmp')\n on_error_do(shutil.rmtree, tmpdir, ignore_errors=True)\n shutil.copymode(save_dir, tmpdir)\n\n retval = cls.convert(dataset, tmpdir, **options)\n\n shutil.rmtree(save_dir)\n os.replace(tmpdir, save_dir)\n\n return retval\n\n def apply(self):\n raise NotImplementedError(\"Should be implemented in a subclass\")\n\n def __init__(self, extractor, save_dir, save_images=False,\n image_ext=None, default_image_ext=None):\n default_image_ext = default_image_ext or self.DEFAULT_IMAGE_EXT\n assert default_image_ext\n self._default_image_ext = default_image_ext\n\n self._save_images = save_images\n self._image_ext = image_ext\n\n self._extractor = extractor\n self._save_dir = save_dir\n\n # TODO: refactor this variable.\n # Can be used by a subclass to store the current patch info\n if isinstance(extractor, DatasetPatch.DatasetPatchWrapper):\n self._patch = extractor.patch\n else:\n self._patch = None\n\n def _find_image_ext(self, item: Union[DatasetItem, Image]):\n src_ext = None\n\n if isinstance(item, DatasetItem) and item.has_image:\n src_ext = item.image.ext\n elif isinstance(item, Image):\n src_ext = item.ext\n\n return self._image_ext or src_ext or self._default_image_ext\n\n def _make_item_filename(self, item, *, name=None, subdir=None):\n name = name or item.id\n subdir = subdir or ''\n return osp.join(subdir, name)\n\n def _make_image_filename(self, item, *, name=None, subdir=None):\n return self._make_item_filename(item, name=name, subdir=subdir) + \\\n self._find_image_ext(item)\n\n def _make_pcd_filename(self, item, *, name=None, subdir=None):\n return self._make_item_filename(item, name=name, subdir=subdir) + '.pcd'\n\n def _save_image(self, item, path=None, *,\n name=None, subdir=None, basedir=None):\n assert not ((subdir or name or basedir) and path), \\\n \"Can't use both subdir or name or basedir and path arguments\"\n\n if not item.has_image or not item.image.has_data:\n log.warning(\"Item '%s' has no image\", item.id)\n return\n\n basedir = basedir or self._save_dir\n path = path or osp.join(basedir,\n self._make_image_filename(item, name=name, subdir=subdir))\n path = osp.abspath(path)\n\n item.image.save(path)\n\n def _save_point_cloud(self, item=None, path=None, *,\n name=None, subdir=None, basedir=None):\n assert not ((subdir or name or basedir) and path), \\\n \"Can't use both subdir or name or basedir and path arguments\"\n\n if not item.point_cloud:\n log.warning(\"Item '%s' has no pcd\", item.id)\n return\n\n basedir = basedir or self._save_dir\n path = path or osp.join(basedir,\n self._make_pcd_filename(item, name=name, subdir=subdir))\n path = osp.abspath(path)\n\n os.makedirs(osp.dirname(path), exist_ok=True)\n if item.point_cloud and osp.isfile(item.point_cloud):\n if item.point_cloud != path:\n shutil.copyfile(item.point_cloud, path)\n", "path": "datumaro/components/converter.py"}]} | 1,551 | 499 |
gh_patches_debug_27089 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1613 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pass calculator options through `pyhf.infer.upperlimit` (toys)
# Description
Currently there's no easy way to pass custom options to upperlimit so it'll always acll asymptotics
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyhf/infer/intervals.py`
Content:
```
1 """Interval estimation"""
2 from pyhf.infer import hypotest
3 from pyhf import get_backend
4 import numpy as np
5
6 __all__ = ["upperlimit"]
7
8
9 def __dir__():
10 return __all__
11
12
13 def _interp(x, xp, fp):
14 tb, _ = get_backend()
15 return tb.astensor(np.interp(x, xp.tolist(), fp.tolist()))
16
17
18 def upperlimit(data, model, scan, level=0.05, return_results=False):
19 """
20 Calculate an upper limit interval ``(0, poi_up)`` for a single
21 Parameter of Interest (POI) using a fixed scan through POI-space.
22
23 Example:
24 >>> import numpy as np
25 >>> import pyhf
26 >>> pyhf.set_backend("numpy")
27 >>> model = pyhf.simplemodels.uncorrelated_background(
28 ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
29 ... )
30 >>> observations = [51, 48]
31 >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
32 >>> scan = np.linspace(0, 5, 21)
33 >>> obs_limit, exp_limits, (scan, results) = pyhf.infer.intervals.upperlimit(
34 ... data, model, scan, return_results=True
35 ... )
36 >>> obs_limit
37 array(1.01764089)
38 >>> exp_limits
39 [array(0.59576921), array(0.76169166), array(1.08504773), array(1.50170482), array(2.06654952)]
40
41 Args:
42 data (:obj:`tensor`): The observed data.
43 model (~pyhf.pdf.Model): The statistical model adhering to the schema ``model.json``.
44 scan (:obj:`iterable`): Iterable of POI values.
45 level (:obj:`float`): The threshold value to evaluate the interpolated results at.
46 return_results (:obj:`bool`): Whether to return the per-point results.
47
48 Returns:
49 Tuple of Tensors:
50
51 - Tensor: The observed upper limit on the POI.
52 - Tensor: The expected upper limits on the POI.
53 - Tuple of Tensors: The given ``scan`` along with the
54 :class:`~pyhf.infer.hypotest` results at each test POI.
55 Only returned when ``return_results`` is ``True``.
56 """
57 tb, _ = get_backend()
58 results = [
59 hypotest(mu, data, model, test_stat="qtilde", return_expected_set=True)
60 for mu in scan
61 ]
62 obs = tb.astensor([[r[0]] for r in results])
63 exp = tb.astensor([[r[1][idx] for idx in range(5)] for r in results])
64
65 result_arrary = tb.concatenate([obs, exp], axis=1).T
66
67 # observed limit and the (0, +-1, +-2)sigma expected limits
68 limits = [_interp(level, result_arrary[idx][::-1], scan[::-1]) for idx in range(6)]
69 obs_limit, exp_limits = limits[0], limits[1:]
70
71 if return_results:
72 return obs_limit, exp_limits, (scan, results)
73 return obs_limit, exp_limits
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pyhf/infer/intervals.py b/src/pyhf/infer/intervals.py
--- a/src/pyhf/infer/intervals.py
+++ b/src/pyhf/infer/intervals.py
@@ -15,7 +15,7 @@
return tb.astensor(np.interp(x, xp.tolist(), fp.tolist()))
-def upperlimit(data, model, scan, level=0.05, return_results=False):
+def upperlimit(data, model, scan, level=0.05, return_results=False, **hypotest_kwargs):
"""
Calculate an upper limit interval ``(0, poi_up)`` for a single
Parameter of Interest (POI) using a fixed scan through POI-space.
@@ -44,6 +44,8 @@
scan (:obj:`iterable`): Iterable of POI values.
level (:obj:`float`): The threshold value to evaluate the interpolated results at.
return_results (:obj:`bool`): Whether to return the per-point results.
+ hypotest_kwargs (:obj:`string`): Kwargs for the calls to
+ :class:`~pyhf.infer.hypotest` to configure the fits.
Returns:
Tuple of Tensors:
@@ -56,7 +58,7 @@
"""
tb, _ = get_backend()
results = [
- hypotest(mu, data, model, test_stat="qtilde", return_expected_set=True)
+ hypotest(mu, data, model, return_expected_set=True, **hypotest_kwargs)
for mu in scan
]
obs = tb.astensor([[r[0]] for r in results])
| {"golden_diff": "diff --git a/src/pyhf/infer/intervals.py b/src/pyhf/infer/intervals.py\n--- a/src/pyhf/infer/intervals.py\n+++ b/src/pyhf/infer/intervals.py\n@@ -15,7 +15,7 @@\n return tb.astensor(np.interp(x, xp.tolist(), fp.tolist()))\n \n \n-def upperlimit(data, model, scan, level=0.05, return_results=False):\n+def upperlimit(data, model, scan, level=0.05, return_results=False, **hypotest_kwargs):\n \"\"\"\n Calculate an upper limit interval ``(0, poi_up)`` for a single\n Parameter of Interest (POI) using a fixed scan through POI-space.\n@@ -44,6 +44,8 @@\n scan (:obj:`iterable`): Iterable of POI values.\n level (:obj:`float`): The threshold value to evaluate the interpolated results at.\n return_results (:obj:`bool`): Whether to return the per-point results.\n+ hypotest_kwargs (:obj:`string`): Kwargs for the calls to\n+ :class:`~pyhf.infer.hypotest` to configure the fits.\n \n Returns:\n Tuple of Tensors:\n@@ -56,7 +58,7 @@\n \"\"\"\n tb, _ = get_backend()\n results = [\n- hypotest(mu, data, model, test_stat=\"qtilde\", return_expected_set=True)\n+ hypotest(mu, data, model, return_expected_set=True, **hypotest_kwargs)\n for mu in scan\n ]\n obs = tb.astensor([[r[0]] for r in results])\n", "issue": "pass calculator options through `pyhf.infer.upperlimit` (toys)\n# Description\r\n\r\nCurrently there's no easy way to pass custom options to upperlimit so it'll always acll asymptotics\n", "before_files": [{"content": "\"\"\"Interval estimation\"\"\"\nfrom pyhf.infer import hypotest\nfrom pyhf import get_backend\nimport numpy as np\n\n__all__ = [\"upperlimit\"]\n\n\ndef __dir__():\n return __all__\n\n\ndef _interp(x, xp, fp):\n tb, _ = get_backend()\n return tb.astensor(np.interp(x, xp.tolist(), fp.tolist()))\n\n\ndef upperlimit(data, model, scan, level=0.05, return_results=False):\n \"\"\"\n Calculate an upper limit interval ``(0, poi_up)`` for a single\n Parameter of Interest (POI) using a fixed scan through POI-space.\n\n Example:\n >>> import numpy as np\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.uncorrelated_background(\n ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]\n ... )\n >>> observations = [51, 48]\n >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)\n >>> scan = np.linspace(0, 5, 21)\n >>> obs_limit, exp_limits, (scan, results) = pyhf.infer.intervals.upperlimit(\n ... data, model, scan, return_results=True\n ... )\n >>> obs_limit\n array(1.01764089)\n >>> exp_limits\n [array(0.59576921), array(0.76169166), array(1.08504773), array(1.50170482), array(2.06654952)]\n\n Args:\n data (:obj:`tensor`): The observed data.\n model (~pyhf.pdf.Model): The statistical model adhering to the schema ``model.json``.\n scan (:obj:`iterable`): Iterable of POI values.\n level (:obj:`float`): The threshold value to evaluate the interpolated results at.\n return_results (:obj:`bool`): Whether to return the per-point results.\n\n Returns:\n Tuple of Tensors:\n\n - Tensor: The observed upper limit on the POI.\n - Tensor: The expected upper limits on the POI.\n - Tuple of Tensors: The given ``scan`` along with the\n :class:`~pyhf.infer.hypotest` results at each test POI.\n Only returned when ``return_results`` is ``True``.\n \"\"\"\n tb, _ = get_backend()\n results = [\n hypotest(mu, data, model, test_stat=\"qtilde\", return_expected_set=True)\n for mu in scan\n ]\n obs = tb.astensor([[r[0]] for r in results])\n exp = tb.astensor([[r[1][idx] for idx in range(5)] for r in results])\n\n result_arrary = tb.concatenate([obs, exp], axis=1).T\n\n # observed limit and the (0, +-1, +-2)sigma expected limits\n limits = [_interp(level, result_arrary[idx][::-1], scan[::-1]) for idx in range(6)]\n obs_limit, exp_limits = limits[0], limits[1:]\n\n if return_results:\n return obs_limit, exp_limits, (scan, results)\n return obs_limit, exp_limits\n", "path": "src/pyhf/infer/intervals.py"}], "after_files": [{"content": "\"\"\"Interval estimation\"\"\"\nfrom pyhf.infer import hypotest\nfrom pyhf import get_backend\nimport numpy as np\n\n__all__ = [\"upperlimit\"]\n\n\ndef __dir__():\n return __all__\n\n\ndef _interp(x, xp, fp):\n tb, _ = get_backend()\n return tb.astensor(np.interp(x, xp.tolist(), fp.tolist()))\n\n\ndef upperlimit(data, model, scan, level=0.05, return_results=False, **hypotest_kwargs):\n \"\"\"\n Calculate an upper limit interval ``(0, poi_up)`` for a single\n Parameter of Interest (POI) using a fixed scan through POI-space.\n\n Example:\n >>> import numpy as np\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.uncorrelated_background(\n ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]\n ... )\n >>> observations = [51, 48]\n >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)\n >>> scan = np.linspace(0, 5, 21)\n >>> obs_limit, exp_limits, (scan, results) = pyhf.infer.intervals.upperlimit(\n ... data, model, scan, return_results=True\n ... )\n >>> obs_limit\n array(1.01764089)\n >>> exp_limits\n [array(0.59576921), array(0.76169166), array(1.08504773), array(1.50170482), array(2.06654952)]\n\n Args:\n data (:obj:`tensor`): The observed data.\n model (~pyhf.pdf.Model): The statistical model adhering to the schema ``model.json``.\n scan (:obj:`iterable`): Iterable of POI values.\n level (:obj:`float`): The threshold value to evaluate the interpolated results at.\n return_results (:obj:`bool`): Whether to return the per-point results.\n hypotest_kwargs (:obj:`string`): Kwargs for the calls to\n :class:`~pyhf.infer.hypotest` to configure the fits.\n\n Returns:\n Tuple of Tensors:\n\n - Tensor: The observed upper limit on the POI.\n - Tensor: The expected upper limits on the POI.\n - Tuple of Tensors: The given ``scan`` along with the\n :class:`~pyhf.infer.hypotest` results at each test POI.\n Only returned when ``return_results`` is ``True``.\n \"\"\"\n tb, _ = get_backend()\n results = [\n hypotest(mu, data, model, return_expected_set=True, **hypotest_kwargs)\n for mu in scan\n ]\n obs = tb.astensor([[r[0]] for r in results])\n exp = tb.astensor([[r[1][idx] for idx in range(5)] for r in results])\n\n result_arrary = tb.concatenate([obs, exp], axis=1).T\n\n # observed limit and the (0, +-1, +-2)sigma expected limits\n limits = [_interp(level, result_arrary[idx][::-1], scan[::-1]) for idx in range(6)]\n obs_limit, exp_limits = limits[0], limits[1:]\n\n if return_results:\n return obs_limit, exp_limits, (scan, results)\n return obs_limit, exp_limits\n", "path": "src/pyhf/infer/intervals.py"}]} | 1,211 | 361 |
gh_patches_debug_35293 | rasdani/github-patches | git_diff | beeware__toga-1069 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash of ImageView example in Gtk
**Describe the bug**
The `imageview` example crashes on Gtk because `toga_gtk.ImageView.rehint()` is called before `toga_gtk.ImageView._pixbuf` has been set by the interface layer. The following traceback is produced:
```
Traceback (most recent call last):
File "/home/samschott/.local/lib/python3.8/site-packages/toga_gtk/app.py", line 93, in gtk_startup
self.interface.startup()
File "/media/psf/Home/Python/toga/examples/imageview/imageview/app.py", line 18, in startup
imageview_from_path = toga.ImageView(image_from_path)
File "/home/samschott/.local/lib/python3.8/site-packages/toga/widgets/imageview.py", line 25, in __init__
self._impl = self.factory.ImageView(interface=self)
File "/home/samschott/.local/lib/python3.8/site-packages/toga_gtk/widgets/base.py", line 12, in __init__
self.interface.style.reapply()
File "/home/samschott/.local/lib/python3.8/site-packages/travertino/declaration.py", line 88, in reapply
self.apply(style, getattr(self, style))
File "/home/samschott/.local/lib/python3.8/site-packages/toga/style/pack.py", line 104, in apply
self._applicator.set_font(
File "/home/samschott/.local/lib/python3.8/site-packages/toga/style/applicator.py", line 25, in set_font
self.widget._impl.rehint()
File "/home/samschott/.local/lib/python3.8/site-packages/toga_gtk/widgets/imageview.py", line 20, in rehint
original_height=self._pixbuf.get_height(),
AttributeError: 'NoneType' object has no attribute 'get_height'
```
**To Reproduce**
Run the imageview example:
```shell
python3 -m imageview
```
**Environment:**
- Operating System: Ubuntu 20.04
- Python version: Python 3.8
- Software versions:
- Toga: 0.3.0.dev23
**Additional context**
This is a tricky issue and I suspect it was introduced by a change to when the style is applied. Essentially, the interface does set image (pixbuf) during init. Nevertheless, the style is already applied during the init of `toga_gtk.base.Widget`, before setting the image (line 12):
https://github.com/beeware/toga/blob/f8bea583c87642ad102776e1b58fd8bb9265b135/src/gtk/toga_gtk/widgets/base.py#L5-L12
The quickest solution may be to guard against `pixbuf` not being set in the `rehint` implementation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/gtk/toga_gtk/widgets/imageview.py`
Content:
```
1
2 from ..libs import GdkPixbuf, Gtk
3 from .base import Widget
4
5
6 class ImageView(Widget):
7
8 def create(self):
9 self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
10 self._image = Gtk.Image()
11 self._pixbuf = None
12 self.native.add(self._image)
13 self.native.interface = self.interface
14
15 def set_image(self, image):
16 self._pixbuf = image._impl.native
17
18 def rehint(self):
19 height, width = self._resize_max(
20 original_height=self._pixbuf.get_height(),
21 original_width=self._pixbuf.get_width(),
22 max_height=self.native.get_allocated_height(),
23 max_width=self.native.get_allocated_width()
24 )
25
26 scaled_pixbuf = self._pixbuf.scale_simple(width, height, GdkPixbuf.InterpType.BILINEAR)
27 self._image.set_from_pixbuf(scaled_pixbuf)
28
29 @staticmethod
30 def _resize_max(original_height, original_width, max_height, max_width):
31
32 # Check to make sure all dimensions have valid sizes
33 if min(original_height, original_width, max_height, max_width) <= 0:
34 return 1, 1
35
36 width_ratio = max_width/original_width
37 height_ratio = max_height/original_height
38
39 height = original_height * width_ratio
40 if height <= max_height:
41 width = original_width * width_ratio
42 else:
43 height = original_height * height_ratio
44 width = original_width * height_ratio
45
46 return int(height), int(width)
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/gtk/toga_gtk/widgets/imageview.py b/src/gtk/toga_gtk/widgets/imageview.py
--- a/src/gtk/toga_gtk/widgets/imageview.py
+++ b/src/gtk/toga_gtk/widgets/imageview.py
@@ -1,10 +1,8 @@
-
-from ..libs import GdkPixbuf, Gtk
+from ..libs import GdkPixbuf, Gtk, Gdk
from .base import Widget
class ImageView(Widget):
-
def create(self):
self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self._image = Gtk.Image()
@@ -15,16 +13,30 @@
def set_image(self, image):
self._pixbuf = image._impl.native
+ def set_bounds(self, x, y, width, height):
+ super().set_bounds(x, y, width, height)
+ # rehint to update scaling of pixbuf
+ self.rehint()
+
def rehint(self):
- height, width = self._resize_max(
- original_height=self._pixbuf.get_height(),
- original_width=self._pixbuf.get_width(),
- max_height=self.native.get_allocated_height(),
- max_width=self.native.get_allocated_width()
- )
+ if self._pixbuf:
+ height, width = self._resize_max(
+ original_height=self._pixbuf.get_height(),
+ original_width=self._pixbuf.get_width(),
+ max_height=self.native.get_allocated_height(),
+ max_width=self.native.get_allocated_width(),
+ )
+
+ dpr = self.native.get_scale_factor()
+
+ scaled_pixbuf = self._pixbuf.scale_simple(
+ width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR
+ )
- scaled_pixbuf = self._pixbuf.scale_simple(width, height, GdkPixbuf.InterpType.BILINEAR)
- self._image.set_from_pixbuf(scaled_pixbuf)
+ surface = Gdk.cairo_surface_create_from_pixbuf(
+ scaled_pixbuf, 0, self.native.get_window() # scale: 0 = same as window
+ )
+ self._image.set_from_surface(surface)
@staticmethod
def _resize_max(original_height, original_width, max_height, max_width):
@@ -33,8 +45,8 @@
if min(original_height, original_width, max_height, max_width) <= 0:
return 1, 1
- width_ratio = max_width/original_width
- height_ratio = max_height/original_height
+ width_ratio = max_width / original_width
+ height_ratio = max_height / original_height
height = original_height * width_ratio
if height <= max_height:
| {"golden_diff": "diff --git a/src/gtk/toga_gtk/widgets/imageview.py b/src/gtk/toga_gtk/widgets/imageview.py\n--- a/src/gtk/toga_gtk/widgets/imageview.py\n+++ b/src/gtk/toga_gtk/widgets/imageview.py\n@@ -1,10 +1,8 @@\n-\n-from ..libs import GdkPixbuf, Gtk\n+from ..libs import GdkPixbuf, Gtk, Gdk\n from .base import Widget\n \n \n class ImageView(Widget):\n-\n def create(self):\n self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self._image = Gtk.Image()\n@@ -15,16 +13,30 @@\n def set_image(self, image):\n self._pixbuf = image._impl.native\n \n+ def set_bounds(self, x, y, width, height):\n+ super().set_bounds(x, y, width, height)\n+ # rehint to update scaling of pixbuf\n+ self.rehint()\n+\n def rehint(self):\n- height, width = self._resize_max(\n- original_height=self._pixbuf.get_height(),\n- original_width=self._pixbuf.get_width(),\n- max_height=self.native.get_allocated_height(),\n- max_width=self.native.get_allocated_width()\n- )\n+ if self._pixbuf:\n+ height, width = self._resize_max(\n+ original_height=self._pixbuf.get_height(),\n+ original_width=self._pixbuf.get_width(),\n+ max_height=self.native.get_allocated_height(),\n+ max_width=self.native.get_allocated_width(),\n+ )\n+\n+ dpr = self.native.get_scale_factor()\n+\n+ scaled_pixbuf = self._pixbuf.scale_simple(\n+ width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR\n+ )\n \n- scaled_pixbuf = self._pixbuf.scale_simple(width, height, GdkPixbuf.InterpType.BILINEAR)\n- self._image.set_from_pixbuf(scaled_pixbuf)\n+ surface = Gdk.cairo_surface_create_from_pixbuf(\n+ scaled_pixbuf, 0, self.native.get_window() # scale: 0 = same as window\n+ )\n+ self._image.set_from_surface(surface)\n \n @staticmethod\n def _resize_max(original_height, original_width, max_height, max_width):\n@@ -33,8 +45,8 @@\n if min(original_height, original_width, max_height, max_width) <= 0:\n return 1, 1\n \n- width_ratio = max_width/original_width\n- height_ratio = max_height/original_height\n+ width_ratio = max_width / original_width\n+ height_ratio = max_height / original_height\n \n height = original_height * width_ratio\n if height <= max_height:\n", "issue": "Crash of ImageView example in Gtk\n**Describe the bug**\r\nThe `imageview` example crashes on Gtk because `toga_gtk.ImageView.rehint()` is called before `toga_gtk.ImageView._pixbuf` has been set by the interface layer. The following traceback is produced:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/samschott/.local/lib/python3.8/site-packages/toga_gtk/app.py\", line 93, in gtk_startup\r\n self.interface.startup()\r\n File \"/media/psf/Home/Python/toga/examples/imageview/imageview/app.py\", line 18, in startup\r\n imageview_from_path = toga.ImageView(image_from_path)\r\n File \"/home/samschott/.local/lib/python3.8/site-packages/toga/widgets/imageview.py\", line 25, in __init__\r\n self._impl = self.factory.ImageView(interface=self)\r\n File \"/home/samschott/.local/lib/python3.8/site-packages/toga_gtk/widgets/base.py\", line 12, in __init__\r\n self.interface.style.reapply()\r\n File \"/home/samschott/.local/lib/python3.8/site-packages/travertino/declaration.py\", line 88, in reapply\r\n self.apply(style, getattr(self, style))\r\n File \"/home/samschott/.local/lib/python3.8/site-packages/toga/style/pack.py\", line 104, in apply\r\n self._applicator.set_font(\r\n File \"/home/samschott/.local/lib/python3.8/site-packages/toga/style/applicator.py\", line 25, in set_font\r\n self.widget._impl.rehint()\r\n File \"/home/samschott/.local/lib/python3.8/site-packages/toga_gtk/widgets/imageview.py\", line 20, in rehint\r\n original_height=self._pixbuf.get_height(),\r\nAttributeError: 'NoneType' object has no attribute 'get_height'\r\n```\r\n\r\n**To Reproduce**\r\nRun the imageview example:\r\n```shell\r\npython3 -m imageview\r\n```\r\n\r\n**Environment:**\r\n - Operating System: Ubuntu 20.04\r\n - Python version: Python 3.8\r\n - Software versions:\r\n - Toga: 0.3.0.dev23\r\n\r\n**Additional context**\r\nThis is a tricky issue and I suspect it was introduced by a change to when the style is applied. Essentially, the interface does set image (pixbuf) during init. Nevertheless, the style is already applied during the init of `toga_gtk.base.Widget`, before setting the image (line 12):\r\n\r\nhttps://github.com/beeware/toga/blob/f8bea583c87642ad102776e1b58fd8bb9265b135/src/gtk/toga_gtk/widgets/base.py#L5-L12\r\n\r\nThe quickest solution may be to guard against `pixbuf` not being set in the `rehint` implementation.\n", "before_files": [{"content": "\nfrom ..libs import GdkPixbuf, Gtk\nfrom .base import Widget\n\n\nclass ImageView(Widget):\n\n def create(self):\n self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self._image = Gtk.Image()\n self._pixbuf = None\n self.native.add(self._image)\n self.native.interface = self.interface\n\n def set_image(self, image):\n self._pixbuf = image._impl.native\n\n def rehint(self):\n height, width = self._resize_max(\n original_height=self._pixbuf.get_height(),\n original_width=self._pixbuf.get_width(),\n max_height=self.native.get_allocated_height(),\n max_width=self.native.get_allocated_width()\n )\n\n scaled_pixbuf = self._pixbuf.scale_simple(width, height, GdkPixbuf.InterpType.BILINEAR)\n self._image.set_from_pixbuf(scaled_pixbuf)\n\n @staticmethod\n def _resize_max(original_height, original_width, max_height, max_width):\n\n # Check to make sure all dimensions have valid sizes\n if min(original_height, original_width, max_height, max_width) <= 0:\n return 1, 1\n\n width_ratio = max_width/original_width\n height_ratio = max_height/original_height\n\n height = original_height * width_ratio\n if height <= max_height:\n width = original_width * width_ratio\n else:\n height = original_height * height_ratio\n width = original_width * height_ratio\n\n return int(height), int(width)\n", "path": "src/gtk/toga_gtk/widgets/imageview.py"}], "after_files": [{"content": "from ..libs import GdkPixbuf, Gtk, Gdk\nfrom .base import Widget\n\n\nclass ImageView(Widget):\n def create(self):\n self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self._image = Gtk.Image()\n self._pixbuf = None\n self.native.add(self._image)\n self.native.interface = self.interface\n\n def set_image(self, image):\n self._pixbuf = image._impl.native\n\n def set_bounds(self, x, y, width, height):\n super().set_bounds(x, y, width, height)\n # rehint to update scaling of pixbuf\n self.rehint()\n\n def rehint(self):\n if self._pixbuf:\n height, width = self._resize_max(\n original_height=self._pixbuf.get_height(),\n original_width=self._pixbuf.get_width(),\n max_height=self.native.get_allocated_height(),\n max_width=self.native.get_allocated_width(),\n )\n\n dpr = self.native.get_scale_factor()\n\n scaled_pixbuf = self._pixbuf.scale_simple(\n width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR\n )\n\n surface = Gdk.cairo_surface_create_from_pixbuf(\n scaled_pixbuf, 0, self.native.get_window() # scale: 0 = same as window\n )\n self._image.set_from_surface(surface)\n\n @staticmethod\n def _resize_max(original_height, original_width, max_height, max_width):\n\n # Check to make sure all dimensions have valid sizes\n if min(original_height, original_width, max_height, max_width) <= 0:\n return 1, 1\n\n width_ratio = max_width / original_width\n height_ratio = max_height / original_height\n\n height = original_height * width_ratio\n if height <= max_height:\n width = original_width * width_ratio\n else:\n height = original_height * height_ratio\n width = original_width * height_ratio\n\n return int(height), int(width)\n", "path": "src/gtk/toga_gtk/widgets/imageview.py"}]} | 1,322 | 599 |
gh_patches_debug_11104 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-1322 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid `dev` version identifiers in `setup.py`
There is a bunch of ~invalid~ version matchers (edit: valid, but not parsed correctly by distlib) in `setup.py`. [PEP 440](https://peps.python.org/pep-0440/) states:
> The canonical public version identifiers MUST comply with the following scheme:
> `[N!]N(.N)*[{a|b|rc}N][.postN][.devN]`
So you are missing a dot and a number in every version identifier that contains the string `dev`.
It is also considered bad practice to have an upper bound on package versions and installers like pip do not typically consider development versions in any case (unless explicitly told to).
See: https://github.com/googleapis/google-api-python-client/issues/2151
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2014 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 from setuptools import find_packages
19 from setuptools import setup
20
21
22 DEPENDENCIES = (
23 "cachetools>=2.0.0,<6.0",
24 "pyasn1-modules>=0.2.1",
25 # rsa==4.5 is the last version to support 2.7
26 # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233
27 "rsa>=3.1.4,<5",
28 # install enum34 to support 2.7. enum34 only works up to python version 3.3.
29 "six>=1.9.0",
30 "urllib3<2.0",
31 )
32
33 extras = {
34 "aiohttp": ["aiohttp >= 3.6.2, < 4.0.0dev", "requests >= 2.20.0, < 3.0.0dev"],
35 "pyopenssl": ["pyopenssl>=20.0.0", "cryptography>=38.0.3"],
36 "requests": "requests >= 2.20.0, < 3.0.0dev",
37 "reauth": "pyu2f>=0.1.5",
38 # Enterprise cert only works for OpenSSL 1.1.1. Newer versions of these
39 # dependencies are built with OpenSSL 3.0 so we need to fix the version.
40 "enterprise_cert": ["cryptography==36.0.2", "pyopenssl==22.0.0"],
41 }
42
43 with io.open("README.rst", "r") as fh:
44 long_description = fh.read()
45
46 package_root = os.path.abspath(os.path.dirname(__file__))
47
48 version = {}
49 with open(os.path.join(package_root, "google/auth/version.py")) as fp:
50 exec(fp.read(), version)
51 version = version["__version__"]
52
53 setup(
54 name="google-auth",
55 version=version,
56 author="Google Cloud Platform",
57 author_email="[email protected]",
58 description="Google Authentication Library",
59 long_description=long_description,
60 url="https://github.com/googleapis/google-auth-library-python",
61 packages=find_packages(exclude=("tests*", "system_tests*")),
62 namespace_packages=("google",),
63 install_requires=DEPENDENCIES,
64 extras_require=extras,
65 python_requires=">=3.6",
66 license="Apache 2.0",
67 keywords="google auth oauth client",
68 classifiers=[
69 "Programming Language :: Python :: 3",
70 "Programming Language :: Python :: 3.6",
71 "Programming Language :: Python :: 3.7",
72 "Programming Language :: Python :: 3.8",
73 "Programming Language :: Python :: 3.9",
74 "Programming Language :: Python :: 3.10",
75 "Programming Language :: Python :: 3.11",
76 "Development Status :: 5 - Production/Stable",
77 "Intended Audience :: Developers",
78 "License :: OSI Approved :: Apache Software License",
79 "Operating System :: POSIX",
80 "Operating System :: Microsoft :: Windows",
81 "Operating System :: MacOS :: MacOS X",
82 "Operating System :: OS Independent",
83 "Topic :: Internet :: WWW/HTTP",
84 ],
85 )
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -31,9 +31,9 @@
)
extras = {
- "aiohttp": ["aiohttp >= 3.6.2, < 4.0.0dev", "requests >= 2.20.0, < 3.0.0dev"],
+ "aiohttp": ["aiohttp >= 3.6.2, < 4.0.0.dev0", "requests >= 2.20.0, < 3.0.0.dev0"],
"pyopenssl": ["pyopenssl>=20.0.0", "cryptography>=38.0.3"],
- "requests": "requests >= 2.20.0, < 3.0.0dev",
+ "requests": "requests >= 2.20.0, < 3.0.0.dev0",
"reauth": "pyu2f>=0.1.5",
# Enterprise cert only works for OpenSSL 1.1.1. Newer versions of these
# dependencies are built with OpenSSL 3.0 so we need to fix the version.
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -31,9 +31,9 @@\n )\n \n extras = {\n- \"aiohttp\": [\"aiohttp >= 3.6.2, < 4.0.0dev\", \"requests >= 2.20.0, < 3.0.0dev\"],\n+ \"aiohttp\": [\"aiohttp >= 3.6.2, < 4.0.0.dev0\", \"requests >= 2.20.0, < 3.0.0.dev0\"],\n \"pyopenssl\": [\"pyopenssl>=20.0.0\", \"cryptography>=38.0.3\"],\n- \"requests\": \"requests >= 2.20.0, < 3.0.0dev\",\n+ \"requests\": \"requests >= 2.20.0, < 3.0.0.dev0\",\n \"reauth\": \"pyu2f>=0.1.5\",\n # Enterprise cert only works for OpenSSL 1.1.1. Newer versions of these\n # dependencies are built with OpenSSL 3.0 so we need to fix the version.\n", "issue": "Invalid `dev` version identifiers in `setup.py`\nThere is a bunch of ~invalid~ version matchers (edit: valid, but not parsed correctly by distlib) in `setup.py`. [PEP 440](https://peps.python.org/pep-0440/) states:\r\n\r\n> The canonical public version identifiers MUST comply with the following scheme:\r\n> `[N!]N(.N)*[{a|b|rc}N][.postN][.devN]`\r\n\r\nSo you are missing a dot and a number in every version identifier that contains the string `dev`.\r\n\r\nIt is also considered bad practice to have an upper bound on package versions and installers like pip do not typically consider development versions in any case (unless explicitly told to).\r\n\r\nSee: https://github.com/googleapis/google-api-python-client/issues/2151\n", "before_files": [{"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<6.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n \"rsa>=3.1.4,<5\",\n # install enum34 to support 2.7. enum34 only works up to python version 3.3.\n \"six>=1.9.0\",\n \"urllib3<2.0\",\n)\n\nextras = {\n \"aiohttp\": [\"aiohttp >= 3.6.2, < 4.0.0dev\", \"requests >= 2.20.0, < 3.0.0dev\"],\n \"pyopenssl\": [\"pyopenssl>=20.0.0\", \"cryptography>=38.0.3\"],\n \"requests\": \"requests >= 2.20.0, < 3.0.0dev\",\n \"reauth\": \"pyu2f>=0.1.5\",\n # Enterprise cert only works for OpenSSL 1.1.1. Newer versions of these\n # dependencies are built with OpenSSL 3.0 so we need to fix the version.\n \"enterprise_cert\": [\"cryptography==36.0.2\", \"pyopenssl==22.0.0\"],\n}\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nversion = {}\nwith open(os.path.join(package_root, \"google/auth/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n python_requires=\">=3.6\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<6.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n \"rsa>=3.1.4,<5\",\n # install enum34 to support 2.7. enum34 only works up to python version 3.3.\n \"six>=1.9.0\",\n \"urllib3<2.0\",\n)\n\nextras = {\n \"aiohttp\": [\"aiohttp >= 3.6.2, < 4.0.0.dev0\", \"requests >= 2.20.0, < 3.0.0.dev0\"],\n \"pyopenssl\": [\"pyopenssl>=20.0.0\", \"cryptography>=38.0.3\"],\n \"requests\": \"requests >= 2.20.0, < 3.0.0.dev0\",\n \"reauth\": \"pyu2f>=0.1.5\",\n # Enterprise cert only works for OpenSSL 1.1.1. Newer versions of these\n # dependencies are built with OpenSSL 3.0 so we need to fix the version.\n \"enterprise_cert\": [\"cryptography==36.0.2\", \"pyopenssl==22.0.0\"],\n}\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nversion = {}\nwith open(os.path.join(package_root, \"google/auth/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n python_requires=\">=3.6\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]} | 1,440 | 270 |
gh_patches_debug_16452 | rasdani/github-patches | git_diff | iterative__dvc-3169 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`dvc pull` with wrong S3 remote failed but user wasn't informed
System
----------
My `dvc` version is 0.77.3, I installed & upgraded `dvc` through `pip3` and I'm using Fedora 31.
Problem
-----------
I followed the 'Getting Started' guide and ran the following commands
```
$ dvc init
$ git add .dvc/* && git commit -m 'Initialized dvc'
$ dvc remote add -d s3remote https://s3.amazonaws.com/<bucket-name>
$ dvc add <some-file>
$ git add .gitignore <some-file>.dvc && git commit -m 'Added <some-file'
$ dvc push -r s3remote
```
The last command finished with `Everything is up to date.` which is weird since the S3 bucket was empty when checking it. I went on to delete `<some-file>` and run `dvc pull -r s3remote` which would restore `<some-file>` without throwing any errors. This was surprising since it did not get clear where `dvc` is storing my file backups and why my S3 bucket was still empty without `dvc` communicating any errors.
Only when I removed the `.dvc/cache` folder and ran `dvc pull -r s3remote` it would complain with the following:
```
ERROR: failed to download 'https://s3.amazonaws.com/<bucket-name>/31/69f7ce4ebb503afca037d35b7eb3a9' to '.dvc/cache/31/69f7ce4ebb503afca037d35b7eb3a9' - '301 Moved Permanently'
ERROR: failed to download 'https://s3.amazonaws.com/<bucket-name>/a3/04afb96060aad90176268345e10355' to '.dvc/cache/a3/04afb96060aad90176268345e10355' - '301 Moved Permanently'
ERROR: failed to pull data from the cloud - 2 files failed to download
Having any troubles? Hit us up at https://dvc.org/support, we are always happy to help!
```
The fix
---------
On my end, it was an easy fix. After consulting the documentation in more detail, I realized that I set up my S3 remote incorrectly using the `https://s3.amazonaws.com/<bucket-name>` URL to the bucket whereas I should have used the `s3://<bucket-name>` URL. Hence, running
```
$ dvc remote modify s3remote url s3://<bucket-name>
```
did the job.
It still remains weird why I was never warned about the remote being incorrectly set up and why `dvc pull -r s3remote` worked without any problems even though there were no files in the S3 bucket (it seems that it restored them from local cache as a fall-back) but it should have given me a headsup!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/remote/http.py`
Content:
```
1 import logging
2 import threading
3
4 from funcy import cached_property, wrap_prop
5
6 from dvc.config import Config
7 from dvc.config import ConfigError
8 from dvc.exceptions import DvcException, HTTPError
9 from dvc.progress import Tqdm
10 from dvc.remote.base import RemoteBASE
11 from dvc.scheme import Schemes
12
13 logger = logging.getLogger(__name__)
14
15
16 class RemoteHTTP(RemoteBASE):
17 scheme = Schemes.HTTP
18 SESSION_RETRIES = 5
19 SESSION_BACKOFF_FACTOR = 0.1
20 REQUEST_TIMEOUT = 10
21 CHUNK_SIZE = 2 ** 16
22 PARAM_CHECKSUM = "etag"
23
24 def __init__(self, repo, config):
25 super().__init__(repo, config)
26
27 url = config.get(Config.SECTION_REMOTE_URL)
28 self.path_info = self.path_cls(url) if url else None
29
30 if not self.no_traverse:
31 raise ConfigError(
32 "HTTP doesn't support traversing the remote to list existing "
33 "files. Use: `dvc remote modify <name> no_traverse true`"
34 )
35
36 def _download(self, from_info, to_file, name=None, no_progress_bar=False):
37 response = self._request("GET", from_info.url, stream=True)
38 if response.status_code != 200:
39 raise HTTPError(response.status_code, response.reason)
40 with Tqdm(
41 total=None if no_progress_bar else self._content_length(response),
42 leave=False,
43 bytes=True,
44 desc=from_info.url if name is None else name,
45 disable=no_progress_bar,
46 ) as pbar:
47 with open(to_file, "wb") as fd:
48 for chunk in response.iter_content(chunk_size=self.CHUNK_SIZE):
49 fd.write(chunk)
50 pbar.update(len(chunk))
51
52 def exists(self, path_info):
53 return bool(self._request("HEAD", path_info.url))
54
55 def _content_length(self, response):
56 res = response.headers.get("Content-Length")
57 return int(res) if res else None
58
59 def get_file_checksum(self, path_info):
60 url = path_info.url
61 headers = self._request("HEAD", url).headers
62 etag = headers.get("ETag") or headers.get("Content-MD5")
63
64 if not etag:
65 raise DvcException(
66 "could not find an ETag or "
67 "Content-MD5 header for '{url}'".format(url=url)
68 )
69
70 if etag.startswith("W/"):
71 raise DvcException(
72 "Weak ETags are not supported."
73 " (Etag: '{etag}', URL: '{url}')".format(etag=etag, url=url)
74 )
75
76 return etag
77
78 @wrap_prop(threading.Lock())
79 @cached_property
80 def _session(self):
81 import requests
82 from requests.adapters import HTTPAdapter
83 from urllib3.util.retry import Retry
84
85 session = requests.Session()
86
87 retries = Retry(
88 total=self.SESSION_RETRIES,
89 backoff_factor=self.SESSION_BACKOFF_FACTOR,
90 )
91
92 session.mount("http://", HTTPAdapter(max_retries=retries))
93 session.mount("https://", HTTPAdapter(max_retries=retries))
94
95 return session
96
97 def _request(self, method, url, **kwargs):
98 import requests
99
100 kwargs.setdefault("allow_redirects", True)
101 kwargs.setdefault("timeout", self.REQUEST_TIMEOUT)
102
103 try:
104 return self._session.request(method, url, **kwargs)
105 except requests.exceptions.RequestException:
106 raise DvcException("could not perform a {} request".format(method))
107
108 def gc(self):
109 raise NotImplementedError
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/remote/http.py b/dvc/remote/http.py
--- a/dvc/remote/http.py
+++ b/dvc/remote/http.py
@@ -101,7 +101,22 @@
kwargs.setdefault("timeout", self.REQUEST_TIMEOUT)
try:
- return self._session.request(method, url, **kwargs)
+ res = self._session.request(method, url, **kwargs)
+
+ redirect_no_location = (
+ kwargs["allow_redirects"]
+ and res.status_code in (301, 302)
+ and "location" not in res.headers
+ )
+
+ if redirect_no_location:
+ # AWS s3 doesn't like to add a location header to its redirects
+ # from https://s3.amazonaws.com/<bucket name>/* type URLs.
+ # This should be treated as an error
+ raise requests.exceptions.RequestException
+
+ return res
+
except requests.exceptions.RequestException:
raise DvcException("could not perform a {} request".format(method))
| {"golden_diff": "diff --git a/dvc/remote/http.py b/dvc/remote/http.py\n--- a/dvc/remote/http.py\n+++ b/dvc/remote/http.py\n@@ -101,7 +101,22 @@\n kwargs.setdefault(\"timeout\", self.REQUEST_TIMEOUT)\n \n try:\n- return self._session.request(method, url, **kwargs)\n+ res = self._session.request(method, url, **kwargs)\n+\n+ redirect_no_location = (\n+ kwargs[\"allow_redirects\"]\n+ and res.status_code in (301, 302)\n+ and \"location\" not in res.headers\n+ )\n+\n+ if redirect_no_location:\n+ # AWS s3 doesn't like to add a location header to its redirects\n+ # from https://s3.amazonaws.com/<bucket name>/* type URLs.\n+ # This should be treated as an error\n+ raise requests.exceptions.RequestException\n+\n+ return res\n+\n except requests.exceptions.RequestException:\n raise DvcException(\"could not perform a {} request\".format(method))\n", "issue": "`dvc pull` with wrong S3 remote failed but user wasn't informed\nSystem\r\n----------\r\n\r\nMy `dvc` version is 0.77.3, I installed & upgraded `dvc` through `pip3` and I'm using Fedora 31.\r\n\r\nProblem\r\n-----------\r\n\r\nI followed the 'Getting Started' guide and ran the following commands\r\n\r\n```\r\n$ dvc init\r\n$ git add .dvc/* && git commit -m 'Initialized dvc'\r\n$ dvc remote add -d s3remote https://s3.amazonaws.com/<bucket-name>\r\n$ dvc add <some-file>\r\n$ git add .gitignore <some-file>.dvc && git commit -m 'Added <some-file'\r\n$ dvc push -r s3remote\r\n```\r\nThe last command finished with `Everything is up to date.` which is weird since the S3 bucket was empty when checking it. I went on to delete `<some-file>` and run `dvc pull -r s3remote` which would restore `<some-file>` without throwing any errors. This was surprising since it did not get clear where `dvc` is storing my file backups and why my S3 bucket was still empty without `dvc` communicating any errors.\r\n\r\nOnly when I removed the `.dvc/cache` folder and ran `dvc pull -r s3remote` it would complain with the following:\r\n\r\n```\r\nERROR: failed to download 'https://s3.amazonaws.com/<bucket-name>/31/69f7ce4ebb503afca037d35b7eb3a9' to '.dvc/cache/31/69f7ce4ebb503afca037d35b7eb3a9' - '301 Moved Permanently'\r\n\r\nERROR: failed to download 'https://s3.amazonaws.com/<bucket-name>/a3/04afb96060aad90176268345e10355' to '.dvc/cache/a3/04afb96060aad90176268345e10355' - '301 Moved Permanently'\r\n\r\nERROR: failed to pull data from the cloud - 2 files failed to download\r\n\r\n\r\nHaving any troubles? Hit us up at https://dvc.org/support, we are always happy to help!\r\n```\r\n\r\nThe fix\r\n---------\r\n\r\nOn my end, it was an easy fix. After consulting the documentation in more detail, I realized that I set up my S3 remote incorrectly using the `https://s3.amazonaws.com/<bucket-name>` URL to the bucket whereas I should have used the `s3://<bucket-name>` URL. Hence, running\r\n\r\n```\r\n$ dvc remote modify s3remote url s3://<bucket-name>\r\n```\r\n\r\ndid the job.\r\n\r\nIt still remains weird why I was never warned about the remote being incorrectly set up and why `dvc pull -r s3remote` worked without any problems even though there were no files in the S3 bucket (it seems that it restored them from local cache as a fall-back) but it should have given me a headsup!\n", "before_files": [{"content": "import logging\nimport threading\n\nfrom funcy import cached_property, wrap_prop\n\nfrom dvc.config import Config\nfrom dvc.config import ConfigError\nfrom dvc.exceptions import DvcException, HTTPError\nfrom dvc.progress import Tqdm\nfrom dvc.remote.base import RemoteBASE\nfrom dvc.scheme import Schemes\n\nlogger = logging.getLogger(__name__)\n\n\nclass RemoteHTTP(RemoteBASE):\n scheme = Schemes.HTTP\n SESSION_RETRIES = 5\n SESSION_BACKOFF_FACTOR = 0.1\n REQUEST_TIMEOUT = 10\n CHUNK_SIZE = 2 ** 16\n PARAM_CHECKSUM = \"etag\"\n\n def __init__(self, repo, config):\n super().__init__(repo, config)\n\n url = config.get(Config.SECTION_REMOTE_URL)\n self.path_info = self.path_cls(url) if url else None\n\n if not self.no_traverse:\n raise ConfigError(\n \"HTTP doesn't support traversing the remote to list existing \"\n \"files. Use: `dvc remote modify <name> no_traverse true`\"\n )\n\n def _download(self, from_info, to_file, name=None, no_progress_bar=False):\n response = self._request(\"GET\", from_info.url, stream=True)\n if response.status_code != 200:\n raise HTTPError(response.status_code, response.reason)\n with Tqdm(\n total=None if no_progress_bar else self._content_length(response),\n leave=False,\n bytes=True,\n desc=from_info.url if name is None else name,\n disable=no_progress_bar,\n ) as pbar:\n with open(to_file, \"wb\") as fd:\n for chunk in response.iter_content(chunk_size=self.CHUNK_SIZE):\n fd.write(chunk)\n pbar.update(len(chunk))\n\n def exists(self, path_info):\n return bool(self._request(\"HEAD\", path_info.url))\n\n def _content_length(self, response):\n res = response.headers.get(\"Content-Length\")\n return int(res) if res else None\n\n def get_file_checksum(self, path_info):\n url = path_info.url\n headers = self._request(\"HEAD\", url).headers\n etag = headers.get(\"ETag\") or headers.get(\"Content-MD5\")\n\n if not etag:\n raise DvcException(\n \"could not find an ETag or \"\n \"Content-MD5 header for '{url}'\".format(url=url)\n )\n\n if etag.startswith(\"W/\"):\n raise DvcException(\n \"Weak ETags are not supported.\"\n \" (Etag: '{etag}', URL: '{url}')\".format(etag=etag, url=url)\n )\n\n return etag\n\n @wrap_prop(threading.Lock())\n @cached_property\n def _session(self):\n import requests\n from requests.adapters import HTTPAdapter\n from urllib3.util.retry import Retry\n\n session = requests.Session()\n\n retries = Retry(\n total=self.SESSION_RETRIES,\n backoff_factor=self.SESSION_BACKOFF_FACTOR,\n )\n\n session.mount(\"http://\", HTTPAdapter(max_retries=retries))\n session.mount(\"https://\", HTTPAdapter(max_retries=retries))\n\n return session\n\n def _request(self, method, url, **kwargs):\n import requests\n\n kwargs.setdefault(\"allow_redirects\", True)\n kwargs.setdefault(\"timeout\", self.REQUEST_TIMEOUT)\n\n try:\n return self._session.request(method, url, **kwargs)\n except requests.exceptions.RequestException:\n raise DvcException(\"could not perform a {} request\".format(method))\n\n def gc(self):\n raise NotImplementedError\n", "path": "dvc/remote/http.py"}], "after_files": [{"content": "import logging\nimport threading\n\nfrom funcy import cached_property, wrap_prop\n\nfrom dvc.config import Config\nfrom dvc.config import ConfigError\nfrom dvc.exceptions import DvcException, HTTPError\nfrom dvc.progress import Tqdm\nfrom dvc.remote.base import RemoteBASE\nfrom dvc.scheme import Schemes\n\nlogger = logging.getLogger(__name__)\n\n\nclass RemoteHTTP(RemoteBASE):\n scheme = Schemes.HTTP\n SESSION_RETRIES = 5\n SESSION_BACKOFF_FACTOR = 0.1\n REQUEST_TIMEOUT = 10\n CHUNK_SIZE = 2 ** 16\n PARAM_CHECKSUM = \"etag\"\n\n def __init__(self, repo, config):\n super().__init__(repo, config)\n\n url = config.get(Config.SECTION_REMOTE_URL)\n self.path_info = self.path_cls(url) if url else None\n\n if not self.no_traverse:\n raise ConfigError(\n \"HTTP doesn't support traversing the remote to list existing \"\n \"files. Use: `dvc remote modify <name> no_traverse true`\"\n )\n\n def _download(self, from_info, to_file, name=None, no_progress_bar=False):\n response = self._request(\"GET\", from_info.url, stream=True)\n if response.status_code != 200:\n raise HTTPError(response.status_code, response.reason)\n with Tqdm(\n total=None if no_progress_bar else self._content_length(response),\n leave=False,\n bytes=True,\n desc=from_info.url if name is None else name,\n disable=no_progress_bar,\n ) as pbar:\n with open(to_file, \"wb\") as fd:\n for chunk in response.iter_content(chunk_size=self.CHUNK_SIZE):\n fd.write(chunk)\n pbar.update(len(chunk))\n\n def exists(self, path_info):\n return bool(self._request(\"HEAD\", path_info.url))\n\n def _content_length(self, response):\n res = response.headers.get(\"Content-Length\")\n return int(res) if res else None\n\n def get_file_checksum(self, path_info):\n url = path_info.url\n headers = self._request(\"HEAD\", url).headers\n etag = headers.get(\"ETag\") or headers.get(\"Content-MD5\")\n\n if not etag:\n raise DvcException(\n \"could not find an ETag or \"\n \"Content-MD5 header for '{url}'\".format(url=url)\n )\n\n if etag.startswith(\"W/\"):\n raise DvcException(\n \"Weak ETags are not supported.\"\n \" (Etag: '{etag}', URL: '{url}')\".format(etag=etag, url=url)\n )\n\n return etag\n\n @wrap_prop(threading.Lock())\n @cached_property\n def _session(self):\n import requests\n from requests.adapters import HTTPAdapter\n from urllib3.util.retry import Retry\n\n session = requests.Session()\n\n retries = Retry(\n total=self.SESSION_RETRIES,\n backoff_factor=self.SESSION_BACKOFF_FACTOR,\n )\n\n session.mount(\"http://\", HTTPAdapter(max_retries=retries))\n session.mount(\"https://\", HTTPAdapter(max_retries=retries))\n\n return session\n\n def _request(self, method, url, **kwargs):\n import requests\n\n kwargs.setdefault(\"allow_redirects\", True)\n kwargs.setdefault(\"timeout\", self.REQUEST_TIMEOUT)\n\n try:\n res = self._session.request(method, url, **kwargs)\n\n redirect_no_location = (\n kwargs[\"allow_redirects\"]\n and res.status_code in (301, 302)\n and \"location\" not in res.headers\n )\n\n if redirect_no_location:\n # AWS s3 doesn't like to add a location header to its redirects\n # from https://s3.amazonaws.com/<bucket name>/* type URLs.\n # This should be treated as an error\n raise requests.exceptions.RequestException\n\n return res\n\n except requests.exceptions.RequestException:\n raise DvcException(\"could not perform a {} request\".format(method))\n\n def gc(self):\n raise NotImplementedError\n", "path": "dvc/remote/http.py"}]} | 1,946 | 234 |
gh_patches_debug_22926 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-236 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No Import BMV
Hi !
Thanks for the plugin, but unfortunately I can't get any data in. I checked, it may be that the street and the street are different. Thank you
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py`
Content:
```
1 import logging
2 from html.parser import HTMLParser
3
4 import requests
5 from waste_collection_schedule import Collection # type: ignore[attr-defined]
6 from waste_collection_schedule.service.ICS import ICS
7
8 TITLE = "BMV.at"
9 DESCRIPTION = "Source for BMV, Austria"
10 URL = "https://www.bmv.at"
11 TEST_CASES = {
12 "Allersdorf": {"ort": "ALLERSDORF", "strasse": "HAUSNUMMER", "hausnummer": 9},
13 "Bad Sauerbrunn": {
14 "ort": "BAD SAUERBRUNN",
15 "strasse": "BUCHINGERWEG",
16 "hausnummer": 16,
17 },
18 }
19
20 _LOGGER = logging.getLogger(__name__)
21
22
23 # Parser for HTML input (hidden) text
24 class HiddenInputParser(HTMLParser):
25 def __init__(self):
26 super().__init__()
27 self._args = {}
28
29 @property
30 def args(self):
31 return self._args
32
33 def handle_starttag(self, tag, attrs):
34 if tag == "input":
35 d = dict(attrs)
36 if d["type"] == "HIDDEN":
37 self._args[d["name"]] = d.get("value")
38
39
40 class Source:
41 def __init__(self, ort, strasse, hausnummer):
42 self._ort = ort
43 self._strasse = strasse
44 self._hausnummer = hausnummer
45 self._ics = ICS()
46
47 def fetch(self):
48 session = requests.session()
49
50 r = session.get(
51 "https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet?SubmitAction=wasteDisposalServices&InFrameMode=TRUE"
52 )
53
54 # add all hidden input fields to form data
55 p = HiddenInputParser()
56 p.feed(r.text)
57 args = p.args
58
59 args["Focus"] = "Hausnummer"
60 args["SubmitAction"] = "forward"
61 args["Ort"] = self._ort
62 args["Strasse"] = self._strasse
63 args["Hausnummer"] = self._hausnummer
64 r = session.post(
65 "https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet", data=args
66 )
67
68 args["ApplicationName"] = "com.athos.kd.udb.AbfuhrTerminModel"
69 args["Focus"] = None
70 args["IsLastPage"] = "true"
71 args["Method"] = "POST"
72 args["PageName"] = "Terminliste"
73 args["SubmitAction"] = "filedownload_ICAL"
74 del args["Ort"]
75 del args["Strasse"]
76 del args["Hausnummer"]
77 r = session.post(
78 "https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet", data=args
79 )
80
81 dates = self._ics.convert(r.text)
82
83 entries = []
84 for d in dates:
85 entries.append(Collection(d[0], d[1]))
86 return entries
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py
@@ -15,6 +15,11 @@
"strasse": "BUCHINGERWEG",
"hausnummer": 16,
},
+ "Rattersdorf": {
+ "ort": "RATTERSDORF",
+ "strasse": "SIEBENBRÜNDLGASSE",
+ "hausnummer": 30,
+ },
}
_LOGGER = logging.getLogger(__name__)
@@ -56,6 +61,24 @@
p.feed(r.text)
args = p.args
+ args["Focus"] = "Ort"
+ args["SubmitAction"] = "changedEvent"
+ args["Ort"] = self._ort
+ args["Strasse"] = "HAUSNUMMER"
+ args["Hausnummer"] = 0
+ r = session.post(
+ "https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet", data=args
+ )
+
+ args["Focus"] = "Strasse"
+ args["SubmitAction"] = "changedEvent"
+ args["Ort"] = self._ort
+ args["Strasse"] = self._strasse
+ args["Hausnummer"] = 0
+ r = session.post(
+ "https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet", data=args
+ )
+
args["Focus"] = "Hausnummer"
args["SubmitAction"] = "forward"
args["Ort"] = self._ort
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py\n@@ -15,6 +15,11 @@\n \"strasse\": \"BUCHINGERWEG\",\n \"hausnummer\": 16,\n },\n+ \"Rattersdorf\": {\n+ \"ort\": \"RATTERSDORF\",\n+ \"strasse\": \"SIEBENBR\u00dcNDLGASSE\",\n+ \"hausnummer\": 30,\n+ },\n }\n \n _LOGGER = logging.getLogger(__name__)\n@@ -56,6 +61,24 @@\n p.feed(r.text)\n args = p.args\n \n+ args[\"Focus\"] = \"Ort\"\n+ args[\"SubmitAction\"] = \"changedEvent\"\n+ args[\"Ort\"] = self._ort\n+ args[\"Strasse\"] = \"HAUSNUMMER\"\n+ args[\"Hausnummer\"] = 0\n+ r = session.post(\n+ \"https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet\", data=args\n+ )\n+\n+ args[\"Focus\"] = \"Strasse\"\n+ args[\"SubmitAction\"] = \"changedEvent\"\n+ args[\"Ort\"] = self._ort\n+ args[\"Strasse\"] = self._strasse\n+ args[\"Hausnummer\"] = 0\n+ r = session.post(\n+ \"https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet\", data=args\n+ )\n+\n args[\"Focus\"] = \"Hausnummer\"\n args[\"SubmitAction\"] = \"forward\"\n args[\"Ort\"] = self._ort\n", "issue": "No Import BMV\nHi !\r\nThanks for the plugin, but unfortunately I can't get any data in. I checked, it may be that the street and the street are different. Thank you\n", "before_files": [{"content": "import logging\nfrom html.parser import HTMLParser\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"BMV.at\"\nDESCRIPTION = \"Source for BMV, Austria\"\nURL = \"https://www.bmv.at\"\nTEST_CASES = {\n \"Allersdorf\": {\"ort\": \"ALLERSDORF\", \"strasse\": \"HAUSNUMMER\", \"hausnummer\": 9},\n \"Bad Sauerbrunn\": {\n \"ort\": \"BAD SAUERBRUNN\",\n \"strasse\": \"BUCHINGERWEG\",\n \"hausnummer\": 16,\n },\n}\n\n_LOGGER = logging.getLogger(__name__)\n\n\n# Parser for HTML input (hidden) text\nclass HiddenInputParser(HTMLParser):\n def __init__(self):\n super().__init__()\n self._args = {}\n\n @property\n def args(self):\n return self._args\n\n def handle_starttag(self, tag, attrs):\n if tag == \"input\":\n d = dict(attrs)\n if d[\"type\"] == \"HIDDEN\":\n self._args[d[\"name\"]] = d.get(\"value\")\n\n\nclass Source:\n def __init__(self, ort, strasse, hausnummer):\n self._ort = ort\n self._strasse = strasse\n self._hausnummer = hausnummer\n self._ics = ICS()\n\n def fetch(self):\n session = requests.session()\n\n r = session.get(\n \"https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet?SubmitAction=wasteDisposalServices&InFrameMode=TRUE\"\n )\n\n # add all hidden input fields to form data\n p = HiddenInputParser()\n p.feed(r.text)\n args = p.args\n\n args[\"Focus\"] = \"Hausnummer\"\n args[\"SubmitAction\"] = \"forward\"\n args[\"Ort\"] = self._ort\n args[\"Strasse\"] = self._strasse\n args[\"Hausnummer\"] = self._hausnummer\n r = session.post(\n \"https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet\", data=args\n )\n\n args[\"ApplicationName\"] = \"com.athos.kd.udb.AbfuhrTerminModel\"\n args[\"Focus\"] = None\n args[\"IsLastPage\"] = \"true\"\n args[\"Method\"] = \"POST\"\n args[\"PageName\"] = \"Terminliste\"\n args[\"SubmitAction\"] = \"filedownload_ICAL\"\n del args[\"Ort\"]\n del args[\"Strasse\"]\n del args[\"Hausnummer\"]\n r = session.post(\n \"https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet\", data=args\n )\n\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py"}], "after_files": [{"content": "import logging\nfrom html.parser import HTMLParser\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"BMV.at\"\nDESCRIPTION = \"Source for BMV, Austria\"\nURL = \"https://www.bmv.at\"\nTEST_CASES = {\n \"Allersdorf\": {\"ort\": \"ALLERSDORF\", \"strasse\": \"HAUSNUMMER\", \"hausnummer\": 9},\n \"Bad Sauerbrunn\": {\n \"ort\": \"BAD SAUERBRUNN\",\n \"strasse\": \"BUCHINGERWEG\",\n \"hausnummer\": 16,\n },\n \"Rattersdorf\": {\n \"ort\": \"RATTERSDORF\",\n \"strasse\": \"SIEBENBR\u00dcNDLGASSE\",\n \"hausnummer\": 30,\n },\n}\n\n_LOGGER = logging.getLogger(__name__)\n\n\n# Parser for HTML input (hidden) text\nclass HiddenInputParser(HTMLParser):\n def __init__(self):\n super().__init__()\n self._args = {}\n\n @property\n def args(self):\n return self._args\n\n def handle_starttag(self, tag, attrs):\n if tag == \"input\":\n d = dict(attrs)\n if d[\"type\"] == \"HIDDEN\":\n self._args[d[\"name\"]] = d.get(\"value\")\n\n\nclass Source:\n def __init__(self, ort, strasse, hausnummer):\n self._ort = ort\n self._strasse = strasse\n self._hausnummer = hausnummer\n self._ics = ICS()\n\n def fetch(self):\n session = requests.session()\n\n r = session.get(\n \"https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet?SubmitAction=wasteDisposalServices&InFrameMode=TRUE\"\n )\n\n # add all hidden input fields to form data\n p = HiddenInputParser()\n p.feed(r.text)\n args = p.args\n\n args[\"Focus\"] = \"Ort\"\n args[\"SubmitAction\"] = \"changedEvent\"\n args[\"Ort\"] = self._ort\n args[\"Strasse\"] = \"HAUSNUMMER\"\n args[\"Hausnummer\"] = 0\n r = session.post(\n \"https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet\", data=args\n )\n\n args[\"Focus\"] = \"Strasse\"\n args[\"SubmitAction\"] = \"changedEvent\"\n args[\"Ort\"] = self._ort\n args[\"Strasse\"] = self._strasse\n args[\"Hausnummer\"] = 0\n r = session.post(\n \"https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet\", data=args\n )\n\n args[\"Focus\"] = \"Hausnummer\"\n args[\"SubmitAction\"] = \"forward\"\n args[\"Ort\"] = self._ort\n args[\"Strasse\"] = self._strasse\n args[\"Hausnummer\"] = self._hausnummer\n r = session.post(\n \"https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet\", data=args\n )\n\n args[\"ApplicationName\"] = \"com.athos.kd.udb.AbfuhrTerminModel\"\n args[\"Focus\"] = None\n args[\"IsLastPage\"] = \"true\"\n args[\"Method\"] = \"POST\"\n args[\"PageName\"] = \"Terminliste\"\n args[\"SubmitAction\"] = \"filedownload_ICAL\"\n del args[\"Ort\"]\n del args[\"Strasse\"]\n del args[\"Hausnummer\"]\n r = session.post(\n \"https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet\", data=args\n )\n\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py"}]} | 1,136 | 417 |
gh_patches_debug_32609 | rasdani/github-patches | git_diff | stephenmcd__mezzanine-1484 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError while import blog posts from Blogger
Hi, when I run the Blogger importer:
```
$ python manage.py import_blogger --mezzanine-user=.. --blogger-id=XXX
```
A ValueError is raised:
``` python
Traceback (most recent call last):
File "manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/.../django/core/management/__init__.py", line 354, in execute_from_command_line
utility.execute()
File "/.../django/core/management/__init__.py", line 346, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/.../django/core/management/base.py", line 394, in run_from_argv
self.execute(*args, **cmd_options)
File "/.../django/core/management/base.py", line 445, in execute
output = self.handle(*args, **options)
File "/.../mezzanine/blog/management/base.py", line 168, in handle
self.handle_import(options)
File "/.../mezzanine/blog/management/commands/import_blogger.py", line 59, in handle_import
"%Y-%m-%dT%H:%M:%S.%f") - timedelta(seconds=timezone)
File "/.../python2.7/_strptime.py", line 325, in _strptime
(data_string, format))
ValueError: time data '2015-11-26T16:21:0' does not match format '%Y-%m-%dT%H:%M:%S.%f'
```
A possible way of fixing this is to change mezzanine/blog/management/commands/import_blogger.py
``` python
try:
published_date = datetime.strptime(entry.published.text[:-6],
"%Y-%m-%dT%H:%M:%S.%f") - timedelta(seconds=timezone)
except ValueError:
published_date = datetime.strptime(entry.published.text[:-6],
"%Y-%m-%dT%H:%M:%S") - timedelta(seconds=timezone)
```
and
``` python
try:
comment_date = datetime.strptime(comment.published.text[:-6],
"%Y-%m-%dT%H:%M:%S.%f") - timedelta(seconds=timezone)
except ValueError:
comment_date = datetime.strptime(comment.published.text[:-6],
"%Y-%m-%dT%H:%M:%S") - timedelta(seconds=timezone)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mezzanine/blog/management/commands/import_blogger.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from datetime import datetime, timedelta
4 from optparse import make_option
5 from time import timezone
6 import re
7
8 from django.core.management.base import CommandError
9
10 from mezzanine.blog.management.base import BaseImporterCommand
11
12
13 # TODO: update this to use v3 of the blogger API.
14 class Command(BaseImporterCommand):
15 """
16 Implements a Blogger importer. Takes a Blogger ID in order to be able to
17 determine which blog it should point to and harvest the XML from.
18 """
19
20 option_list = BaseImporterCommand.option_list + (
21 make_option("-b", "--blogger-id", dest="blog_id",
22 help="Blogger Blog ID from blogger dashboard"),
23 )
24
25 def handle_import(self, options):
26 """
27 Gets posts from Blogger.
28 """
29
30 blog_id = options.get("blog_id")
31 if blog_id is None:
32 raise CommandError("Usage is import_blogger %s" % self.args)
33
34 try:
35 from gdata import service
36 except ImportError:
37 raise CommandError("Could not import the gdata library.")
38
39 blogger = service.GDataService()
40 blogger.service = "blogger"
41 blogger.server = "www.blogger.com"
42
43 start_index = 1
44 processed_posts = []
45 new_posts = 1
46
47 while new_posts:
48 new_posts = 0
49
50 query = service.Query()
51 query.feed = "/feeds/%s/posts/full" % blog_id
52 query.max_results = 500
53 query.start_index = start_index
54
55 try:
56 feed = blogger.Get(query.ToUri())
57 except service.RequestError as err:
58 message = "There was a service error. The response was: " \
59 "%(status)s %(reason)s - %(body)s" % err.message
60 raise CommandError(message, blogger.server + query.feed,
61 err.message["status"])
62
63 for (i, entry) in enumerate(feed.entry):
64 # this basically gets the unique post ID from the URL to itself
65 # and pulls the ID off the end.
66 post_id = entry.GetSelfLink().href.split("/")[-1]
67
68 # Skip duplicate posts. Important for the last query.
69 if post_id in processed_posts:
70 continue
71
72 title = entry.title.text
73 content = entry.content.text
74 # this strips off the time zone info off the end as we want UTC
75 clean_date = entry.published.text[:re.search(r"\.\d{3}",
76 entry.published.text).end()]
77 published_date = datetime.strptime(clean_date,
78 "%Y-%m-%dT%H:%M:%S.%f") - timedelta(seconds=timezone)
79
80 # TODO - issues with content not generating correct <P> tags
81
82 tags = [tag.term for tag in entry.category]
83 post = self.add_post(title=title, content=content,
84 pub_date=published_date, tags=tags)
85
86 # get the comments from the post feed and then add them to
87 # the post details
88 comment_url = "/feeds/%s/%s/comments/full?max-results=1000"
89 comments = blogger.Get(comment_url % (blog_id, post_id))
90
91 for comment in comments.entry:
92 email = comment.author[0].email.text
93 author_name = comment.author[0].name.text
94 # Strip off the time zone info off the end as we want UTC
95 clean_date = comment.published.text[:re.search(r"\.\d{3}",
96 comment.published.text).end()]
97 comment_date = datetime.strptime(clean_date,
98 "%Y-%m-%dT%H:%M:%S.%f") - timedelta(seconds=timezone)
99 website = ""
100 if comment.author[0].uri:
101 website = comment.author[0].uri.text
102 body = comment.content.text
103
104 # add the comment as a dict to the end of the comments list
105 self.add_comment(post=post, name=author_name, email=email,
106 body=body, website=website,
107 pub_date=comment_date)
108
109 processed_posts.append(post_id)
110 new_posts += 1
111
112 start_index += 500
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mezzanine/blog/management/commands/import_blogger.py b/mezzanine/blog/management/commands/import_blogger.py
--- a/mezzanine/blog/management/commands/import_blogger.py
+++ b/mezzanine/blog/management/commands/import_blogger.py
@@ -74,8 +74,8 @@
# this strips off the time zone info off the end as we want UTC
clean_date = entry.published.text[:re.search(r"\.\d{3}",
entry.published.text).end()]
- published_date = datetime.strptime(clean_date,
- "%Y-%m-%dT%H:%M:%S.%f") - timedelta(seconds=timezone)
+
+ published_date = self.parse_datetime(clean_date)
# TODO - issues with content not generating correct <P> tags
@@ -94,8 +94,9 @@
# Strip off the time zone info off the end as we want UTC
clean_date = comment.published.text[:re.search(r"\.\d{3}",
comment.published.text).end()]
- comment_date = datetime.strptime(clean_date,
- "%Y-%m-%dT%H:%M:%S.%f") - timedelta(seconds=timezone)
+
+ comment_date = self.parse_datetime(clean_date)
+
website = ""
if comment.author[0].uri:
website = comment.author[0].uri.text
@@ -110,3 +111,14 @@
new_posts += 1
start_index += 500
+
+ def parse_datetime(self, datetime_string):
+ try:
+ parsed_datetime = datetime.strptime(datetime_string,
+ "%Y-%m-%dT%H:%M:%S.%f")
+ except ValueError:
+ parsed_datetime = datetime.strptime(datetime_string,
+ "%Y-%m-%dT%H:%M:%S")
+
+ parsed_datetime -= timedelta(seconds=timezone)
+ return parsed_datetime
| {"golden_diff": "diff --git a/mezzanine/blog/management/commands/import_blogger.py b/mezzanine/blog/management/commands/import_blogger.py\n--- a/mezzanine/blog/management/commands/import_blogger.py\n+++ b/mezzanine/blog/management/commands/import_blogger.py\n@@ -74,8 +74,8 @@\n # this strips off the time zone info off the end as we want UTC\n clean_date = entry.published.text[:re.search(r\"\\.\\d{3}\",\n entry.published.text).end()]\n- published_date = datetime.strptime(clean_date,\n- \"%Y-%m-%dT%H:%M:%S.%f\") - timedelta(seconds=timezone)\n+\n+ published_date = self.parse_datetime(clean_date)\n \n # TODO - issues with content not generating correct <P> tags\n \n@@ -94,8 +94,9 @@\n # Strip off the time zone info off the end as we want UTC\n clean_date = comment.published.text[:re.search(r\"\\.\\d{3}\",\n comment.published.text).end()]\n- comment_date = datetime.strptime(clean_date,\n- \"%Y-%m-%dT%H:%M:%S.%f\") - timedelta(seconds=timezone)\n+\n+ comment_date = self.parse_datetime(clean_date)\n+\n website = \"\"\n if comment.author[0].uri:\n website = comment.author[0].uri.text\n@@ -110,3 +111,14 @@\n new_posts += 1\n \n start_index += 500\n+\n+ def parse_datetime(self, datetime_string):\n+ try:\n+ parsed_datetime = datetime.strptime(datetime_string,\n+ \"%Y-%m-%dT%H:%M:%S.%f\")\n+ except ValueError:\n+ parsed_datetime = datetime.strptime(datetime_string,\n+ \"%Y-%m-%dT%H:%M:%S\")\n+\n+ parsed_datetime -= timedelta(seconds=timezone)\n+ return parsed_datetime\n", "issue": "ValueError while import blog posts from Blogger \nHi, when I run the Blogger importer: \n\n```\n$ python manage.py import_blogger --mezzanine-user=.. --blogger-id=XXX\n```\n\nA ValueError is raised:\n\n``` python\nTraceback (most recent call last):\n File \"manage.py\", line 15, in <module>\n execute_from_command_line(sys.argv)\n File \"/.../django/core/management/__init__.py\", line 354, in execute_from_command_line\n utility.execute()\n File \"/.../django/core/management/__init__.py\", line 346, in execute\n self.fetch_command(subcommand).run_from_argv(self.argv)\n File \"/.../django/core/management/base.py\", line 394, in run_from_argv\n self.execute(*args, **cmd_options)\n File \"/.../django/core/management/base.py\", line 445, in execute\n output = self.handle(*args, **options)\n File \"/.../mezzanine/blog/management/base.py\", line 168, in handle\n self.handle_import(options)\n File \"/.../mezzanine/blog/management/commands/import_blogger.py\", line 59, in handle_import\n \"%Y-%m-%dT%H:%M:%S.%f\") - timedelta(seconds=timezone)\n File \"/.../python2.7/_strptime.py\", line 325, in _strptime\n (data_string, format))\nValueError: time data '2015-11-26T16:21:0' does not match format '%Y-%m-%dT%H:%M:%S.%f'\n```\n\nA possible way of fixing this is to change mezzanine/blog/management/commands/import_blogger.py\n\n``` python\ntry:\n published_date = datetime.strptime(entry.published.text[:-6],\n \"%Y-%m-%dT%H:%M:%S.%f\") - timedelta(seconds=timezone)\nexcept ValueError:\n published_date = datetime.strptime(entry.published.text[:-6],\n \"%Y-%m-%dT%H:%M:%S\") - timedelta(seconds=timezone)\n```\n\nand\n\n``` python\ntry:\n comment_date = datetime.strptime(comment.published.text[:-6],\n \"%Y-%m-%dT%H:%M:%S.%f\") - timedelta(seconds=timezone)\nexcept ValueError:\n comment_date = datetime.strptime(comment.published.text[:-6],\n \"%Y-%m-%dT%H:%M:%S\") - timedelta(seconds=timezone)\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom datetime import datetime, timedelta\nfrom optparse import make_option\nfrom time import timezone\nimport re\n\nfrom django.core.management.base import CommandError\n\nfrom mezzanine.blog.management.base import BaseImporterCommand\n\n\n# TODO: update this to use v3 of the blogger API.\nclass Command(BaseImporterCommand):\n \"\"\"\n Implements a Blogger importer. Takes a Blogger ID in order to be able to\n determine which blog it should point to and harvest the XML from.\n \"\"\"\n\n option_list = BaseImporterCommand.option_list + (\n make_option(\"-b\", \"--blogger-id\", dest=\"blog_id\",\n help=\"Blogger Blog ID from blogger dashboard\"),\n )\n\n def handle_import(self, options):\n \"\"\"\n Gets posts from Blogger.\n \"\"\"\n\n blog_id = options.get(\"blog_id\")\n if blog_id is None:\n raise CommandError(\"Usage is import_blogger %s\" % self.args)\n\n try:\n from gdata import service\n except ImportError:\n raise CommandError(\"Could not import the gdata library.\")\n\n blogger = service.GDataService()\n blogger.service = \"blogger\"\n blogger.server = \"www.blogger.com\"\n\n start_index = 1\n processed_posts = []\n new_posts = 1\n\n while new_posts:\n new_posts = 0\n\n query = service.Query()\n query.feed = \"/feeds/%s/posts/full\" % blog_id\n query.max_results = 500\n query.start_index = start_index\n\n try:\n feed = blogger.Get(query.ToUri())\n except service.RequestError as err:\n message = \"There was a service error. The response was: \" \\\n \"%(status)s %(reason)s - %(body)s\" % err.message\n raise CommandError(message, blogger.server + query.feed,\n err.message[\"status\"])\n\n for (i, entry) in enumerate(feed.entry):\n # this basically gets the unique post ID from the URL to itself\n # and pulls the ID off the end.\n post_id = entry.GetSelfLink().href.split(\"/\")[-1]\n\n # Skip duplicate posts. Important for the last query.\n if post_id in processed_posts:\n continue\n\n title = entry.title.text\n content = entry.content.text\n # this strips off the time zone info off the end as we want UTC\n clean_date = entry.published.text[:re.search(r\"\\.\\d{3}\",\n entry.published.text).end()]\n published_date = datetime.strptime(clean_date,\n \"%Y-%m-%dT%H:%M:%S.%f\") - timedelta(seconds=timezone)\n\n # TODO - issues with content not generating correct <P> tags\n\n tags = [tag.term for tag in entry.category]\n post = self.add_post(title=title, content=content,\n pub_date=published_date, tags=tags)\n\n # get the comments from the post feed and then add them to\n # the post details\n comment_url = \"/feeds/%s/%s/comments/full?max-results=1000\"\n comments = blogger.Get(comment_url % (blog_id, post_id))\n\n for comment in comments.entry:\n email = comment.author[0].email.text\n author_name = comment.author[0].name.text\n # Strip off the time zone info off the end as we want UTC\n clean_date = comment.published.text[:re.search(r\"\\.\\d{3}\",\n comment.published.text).end()]\n comment_date = datetime.strptime(clean_date,\n \"%Y-%m-%dT%H:%M:%S.%f\") - timedelta(seconds=timezone)\n website = \"\"\n if comment.author[0].uri:\n website = comment.author[0].uri.text\n body = comment.content.text\n\n # add the comment as a dict to the end of the comments list\n self.add_comment(post=post, name=author_name, email=email,\n body=body, website=website,\n pub_date=comment_date)\n\n processed_posts.append(post_id)\n new_posts += 1\n\n start_index += 500\n", "path": "mezzanine/blog/management/commands/import_blogger.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom datetime import datetime, timedelta\nfrom optparse import make_option\nfrom time import timezone\nimport re\n\nfrom django.core.management.base import CommandError\n\nfrom mezzanine.blog.management.base import BaseImporterCommand\n\n\n# TODO: update this to use v3 of the blogger API.\nclass Command(BaseImporterCommand):\n \"\"\"\n Implements a Blogger importer. Takes a Blogger ID in order to be able to\n determine which blog it should point to and harvest the XML from.\n \"\"\"\n\n option_list = BaseImporterCommand.option_list + (\n make_option(\"-b\", \"--blogger-id\", dest=\"blog_id\",\n help=\"Blogger Blog ID from blogger dashboard\"),\n )\n\n def handle_import(self, options):\n \"\"\"\n Gets posts from Blogger.\n \"\"\"\n\n blog_id = options.get(\"blog_id\")\n if blog_id is None:\n raise CommandError(\"Usage is import_blogger %s\" % self.args)\n\n try:\n from gdata import service\n except ImportError:\n raise CommandError(\"Could not import the gdata library.\")\n\n blogger = service.GDataService()\n blogger.service = \"blogger\"\n blogger.server = \"www.blogger.com\"\n\n start_index = 1\n processed_posts = []\n new_posts = 1\n\n while new_posts:\n new_posts = 0\n\n query = service.Query()\n query.feed = \"/feeds/%s/posts/full\" % blog_id\n query.max_results = 500\n query.start_index = start_index\n\n try:\n feed = blogger.Get(query.ToUri())\n except service.RequestError as err:\n message = \"There was a service error. The response was: \" \\\n \"%(status)s %(reason)s - %(body)s\" % err.message\n raise CommandError(message, blogger.server + query.feed,\n err.message[\"status\"])\n\n for (i, entry) in enumerate(feed.entry):\n # this basically gets the unique post ID from the URL to itself\n # and pulls the ID off the end.\n post_id = entry.GetSelfLink().href.split(\"/\")[-1]\n\n # Skip duplicate posts. Important for the last query.\n if post_id in processed_posts:\n continue\n\n title = entry.title.text\n content = entry.content.text\n # this strips off the time zone info off the end as we want UTC\n clean_date = entry.published.text[:re.search(r\"\\.\\d{3}\",\n entry.published.text).end()]\n\n published_date = self.parse_datetime(clean_date)\n\n # TODO - issues with content not generating correct <P> tags\n\n tags = [tag.term for tag in entry.category]\n post = self.add_post(title=title, content=content,\n pub_date=published_date, tags=tags)\n\n # get the comments from the post feed and then add them to\n # the post details\n comment_url = \"/feeds/%s/%s/comments/full?max-results=1000\"\n comments = blogger.Get(comment_url % (blog_id, post_id))\n\n for comment in comments.entry:\n email = comment.author[0].email.text\n author_name = comment.author[0].name.text\n # Strip off the time zone info off the end as we want UTC\n clean_date = comment.published.text[:re.search(r\"\\.\\d{3}\",\n comment.published.text).end()]\n\n comment_date = self.parse_datetime(clean_date)\n\n website = \"\"\n if comment.author[0].uri:\n website = comment.author[0].uri.text\n body = comment.content.text\n\n # add the comment as a dict to the end of the comments list\n self.add_comment(post=post, name=author_name, email=email,\n body=body, website=website,\n pub_date=comment_date)\n\n processed_posts.append(post_id)\n new_posts += 1\n\n start_index += 500\n\n def parse_datetime(self, datetime_string):\n try:\n parsed_datetime = datetime.strptime(datetime_string,\n \"%Y-%m-%dT%H:%M:%S.%f\")\n except ValueError:\n parsed_datetime = datetime.strptime(datetime_string,\n \"%Y-%m-%dT%H:%M:%S\")\n\n parsed_datetime -= timedelta(seconds=timezone)\n return parsed_datetime\n", "path": "mezzanine/blog/management/commands/import_blogger.py"}]} | 1,921 | 412 |
gh_patches_debug_21680 | rasdani/github-patches | git_diff | conan-io__conan-2943 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Inconsistency between local and remote version of `conan search`
Depending on searching either in remotes or locally we're getting different results for situations where we don't use wildcards.
Example:
```
$ conan search zlib
There are no packages matching the 'zlib' pattern
$ conan search zlib*
Existing package recipes:
zlib/1.2.8@conan/stable
zlib/1.2.11@conan/stable
```
```
$ conan search zlib -r conan-center
Existing package recipes:
zlib/1.2.8@conan/stable
zlib/1.2.11@conan/stable
zlib/1.2.11@conan/testing
```
Same for combinations such as `zlib/1.2.8`, `zlib/1.2.8@`, `zlib/1.2.8@conan`, `zlib/1.2.8@conan/` except for `zlib/`.
Proposition: make local search act in the same manner as remote search.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/search/search.py`
Content:
```
1 import re
2 import os
3
4
5 from fnmatch import translate
6
7 from conans.errors import ConanException, NotFoundException
8 from conans.model.info import ConanInfo
9 from conans.model.ref import PackageReference, ConanFileReference
10 from conans.paths import CONANINFO
11 from conans.util.log import logger
12 from conans.search.query_parse import infix_to_postfix, evaluate_postfix
13 from conans.util.files import list_folder_subdirs, load
14
15
16 def filter_outdated(packages_infos, recipe_hash):
17 result = {}
18 for package_id, info in packages_infos.items():
19 try: # Existing package_info of old package might not have recipe_hash
20 if info["recipe_hash"] != recipe_hash:
21 result[package_id] = info
22 except KeyError:
23 pass
24 return result
25
26
27 def filter_packages(query, package_infos):
28 if query is None:
29 return package_infos
30 try:
31 if "!" in query:
32 raise ConanException("'!' character is not allowed")
33 if " not " in query or query.startswith("not "):
34 raise ConanException("'not' operator is not allowed")
35 postfix = infix_to_postfix(query) if query else []
36 result = {}
37 for package_id, info in package_infos.items():
38 if evaluate_postfix_with_info(postfix, info):
39 result[package_id] = info
40 return result
41 except Exception as exc:
42 raise ConanException("Invalid package query: %s. %s" % (query, exc))
43
44
45 def evaluate_postfix_with_info(postfix, conan_vars_info):
46
47 # Evaluate conaninfo with the expression
48
49 def evaluate_info(expression):
50 """Receives an expression like compiler.version="12"
51 Uses conan_vars_info in the closure to evaluate it"""
52 name, value = expression.split("=", 1)
53 value = value.replace("\"", "")
54 return evaluate(name, value, conan_vars_info)
55
56 return evaluate_postfix(postfix, evaluate_info)
57
58
59 def evaluate(prop_name, prop_value, conan_vars_info):
60 """
61 Evaluates a single prop_name, prop_value like "os", "Windows" against conan_vars_info.serialize_min()
62 """
63
64 def compatible_prop(setting_value, prop_value):
65 return setting_value is None or prop_value == setting_value
66
67 info_settings = conan_vars_info.get("settings", [])
68 info_options = conan_vars_info.get("options", [])
69
70 if prop_name in ["os", "compiler", "arch", "build_type"] or prop_name.startswith("compiler."):
71 return compatible_prop(info_settings.get(prop_name, None), prop_value)
72 else:
73 return compatible_prop(info_options.get(prop_name, None), prop_value)
74 return False
75
76
77 def search_recipes(paths, pattern=None, ignorecase=True):
78 # Conan references in main storage
79 if pattern:
80 if isinstance(pattern, ConanFileReference):
81 pattern = str(pattern)
82 pattern = translate(pattern)
83 pattern = re.compile(pattern, re.IGNORECASE) if ignorecase else re.compile(pattern)
84
85 subdirs = list_folder_subdirs(basedir=paths.store, level=4)
86 if not pattern:
87 return sorted([ConanFileReference(*folder.split("/")) for folder in subdirs])
88 else:
89 ret = []
90 for subdir in subdirs:
91 conan_ref = ConanFileReference(*subdir.split("/"))
92 if pattern:
93 if pattern.match(str(conan_ref)):
94 ret.append(conan_ref)
95 return sorted(ret)
96
97
98 def search_packages(paths, reference, query):
99 """ Return a dict like this:
100
101 {package_ID: {name: "OpenCV",
102 version: "2.14",
103 settings: {os: Windows}}}
104 param conan_ref: ConanFileReference object
105 """
106 infos = _get_local_infos_min(paths, reference)
107 return filter_packages(query, infos)
108
109
110 def _get_local_infos_min(paths, reference):
111 result = {}
112 packages_path = paths.packages(reference)
113 subdirs = list_folder_subdirs(packages_path, level=1)
114 for package_id in subdirs:
115 # Read conaninfo
116 try:
117 package_reference = PackageReference(reference, package_id)
118 info_path = os.path.join(paths.package(package_reference,
119 short_paths=None), CONANINFO)
120 if not os.path.exists(info_path):
121 raise NotFoundException("")
122 conan_info_content = load(info_path)
123 conan_vars_info = ConanInfo.loads(conan_info_content).serialize_min()
124 result[package_id] = conan_vars_info
125
126 except Exception as exc:
127 logger.error("Package %s has no ConanInfo file" % str(package_reference))
128 if str(exc):
129 logger.error(str(exc))
130
131 return result
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/search/search.py b/conans/search/search.py
--- a/conans/search/search.py
+++ b/conans/search/search.py
@@ -1,7 +1,6 @@
import re
import os
-
from fnmatch import translate
from conans.errors import ConanException, NotFoundException
@@ -90,11 +89,27 @@
for subdir in subdirs:
conan_ref = ConanFileReference(*subdir.split("/"))
if pattern:
- if pattern.match(str(conan_ref)):
+ if _partial_match(pattern, conan_ref):
ret.append(conan_ref)
+
return sorted(ret)
+def _partial_match(pattern, conan_ref):
+ """
+ Finds if pattern matches any of partial sums of tokens of conan reference
+ """
+
+ tokens = str(conan_ref).replace('/', ' / ').replace('@', ' @ ').split()
+
+ def partial_sums(iterable):
+ sum = ''
+ for i in iterable:
+ sum += i
+ yield sum
+
+ return any(map(pattern.match, list(partial_sums(tokens))))
+
def search_packages(paths, reference, query):
""" Return a dict like this:
| {"golden_diff": "diff --git a/conans/search/search.py b/conans/search/search.py\n--- a/conans/search/search.py\n+++ b/conans/search/search.py\n@@ -1,7 +1,6 @@\n import re\n import os\n \n-\n from fnmatch import translate\n \n from conans.errors import ConanException, NotFoundException\n@@ -90,11 +89,27 @@\n for subdir in subdirs:\n conan_ref = ConanFileReference(*subdir.split(\"/\"))\n if pattern:\n- if pattern.match(str(conan_ref)):\n+ if _partial_match(pattern, conan_ref):\n ret.append(conan_ref)\n+\n return sorted(ret)\n \n \n+def _partial_match(pattern, conan_ref):\n+ \"\"\"\n+ Finds if pattern matches any of partial sums of tokens of conan reference\n+ \"\"\"\n+ \n+ tokens = str(conan_ref).replace('/', ' / ').replace('@', ' @ ').split()\n+\n+ def partial_sums(iterable):\n+ sum = ''\n+ for i in iterable:\n+ sum += i\n+ yield sum\n+\n+ return any(map(pattern.match, list(partial_sums(tokens))))\n+\n def search_packages(paths, reference, query):\n \"\"\" Return a dict like this:\n", "issue": "Inconsistency between local and remote version of `conan search`\nDepending on searching either in remotes or locally we're getting different results for situations where we don't use wildcards. \r\nExample:\r\n```\r\n$ conan search zlib\r\nThere are no packages matching the 'zlib' pattern\r\n\r\n$ conan search zlib*\r\nExisting package recipes:\r\n\r\nzlib/1.2.8@conan/stable\r\nzlib/1.2.11@conan/stable\r\n```\r\n```\r\n$ conan search zlib -r conan-center\r\nExisting package recipes:\r\n\r\nzlib/1.2.8@conan/stable\r\nzlib/1.2.11@conan/stable\r\nzlib/1.2.11@conan/testing\r\n```\r\nSame for combinations such as `zlib/1.2.8`, `zlib/1.2.8@`, `zlib/1.2.8@conan`, `zlib/1.2.8@conan/` except for `zlib/`.\r\n\r\nProposition: make local search act in the same manner as remote search.\n", "before_files": [{"content": "import re\nimport os\n\n\nfrom fnmatch import translate\n\nfrom conans.errors import ConanException, NotFoundException\nfrom conans.model.info import ConanInfo\nfrom conans.model.ref import PackageReference, ConanFileReference\nfrom conans.paths import CONANINFO\nfrom conans.util.log import logger\nfrom conans.search.query_parse import infix_to_postfix, evaluate_postfix\nfrom conans.util.files import list_folder_subdirs, load\n\n\ndef filter_outdated(packages_infos, recipe_hash):\n result = {}\n for package_id, info in packages_infos.items():\n try: # Existing package_info of old package might not have recipe_hash\n if info[\"recipe_hash\"] != recipe_hash:\n result[package_id] = info\n except KeyError:\n pass\n return result\n\n\ndef filter_packages(query, package_infos):\n if query is None:\n return package_infos\n try:\n if \"!\" in query:\n raise ConanException(\"'!' character is not allowed\")\n if \" not \" in query or query.startswith(\"not \"):\n raise ConanException(\"'not' operator is not allowed\")\n postfix = infix_to_postfix(query) if query else []\n result = {}\n for package_id, info in package_infos.items():\n if evaluate_postfix_with_info(postfix, info):\n result[package_id] = info\n return result\n except Exception as exc:\n raise ConanException(\"Invalid package query: %s. %s\" % (query, exc))\n\n\ndef evaluate_postfix_with_info(postfix, conan_vars_info):\n\n # Evaluate conaninfo with the expression\n\n def evaluate_info(expression):\n \"\"\"Receives an expression like compiler.version=\"12\"\n Uses conan_vars_info in the closure to evaluate it\"\"\"\n name, value = expression.split(\"=\", 1)\n value = value.replace(\"\\\"\", \"\")\n return evaluate(name, value, conan_vars_info)\n\n return evaluate_postfix(postfix, evaluate_info)\n\n\ndef evaluate(prop_name, prop_value, conan_vars_info):\n \"\"\"\n Evaluates a single prop_name, prop_value like \"os\", \"Windows\" against conan_vars_info.serialize_min()\n \"\"\"\n\n def compatible_prop(setting_value, prop_value):\n return setting_value is None or prop_value == setting_value\n\n info_settings = conan_vars_info.get(\"settings\", [])\n info_options = conan_vars_info.get(\"options\", [])\n\n if prop_name in [\"os\", \"compiler\", \"arch\", \"build_type\"] or prop_name.startswith(\"compiler.\"):\n return compatible_prop(info_settings.get(prop_name, None), prop_value)\n else:\n return compatible_prop(info_options.get(prop_name, None), prop_value)\n return False\n\n\ndef search_recipes(paths, pattern=None, ignorecase=True):\n # Conan references in main storage\n if pattern:\n if isinstance(pattern, ConanFileReference):\n pattern = str(pattern)\n pattern = translate(pattern)\n pattern = re.compile(pattern, re.IGNORECASE) if ignorecase else re.compile(pattern)\n\n subdirs = list_folder_subdirs(basedir=paths.store, level=4)\n if not pattern:\n return sorted([ConanFileReference(*folder.split(\"/\")) for folder in subdirs])\n else:\n ret = []\n for subdir in subdirs:\n conan_ref = ConanFileReference(*subdir.split(\"/\"))\n if pattern:\n if pattern.match(str(conan_ref)):\n ret.append(conan_ref)\n return sorted(ret)\n\n\ndef search_packages(paths, reference, query):\n \"\"\" Return a dict like this:\n\n {package_ID: {name: \"OpenCV\",\n version: \"2.14\",\n settings: {os: Windows}}}\n param conan_ref: ConanFileReference object\n \"\"\"\n infos = _get_local_infos_min(paths, reference)\n return filter_packages(query, infos)\n\n\ndef _get_local_infos_min(paths, reference):\n result = {}\n packages_path = paths.packages(reference)\n subdirs = list_folder_subdirs(packages_path, level=1)\n for package_id in subdirs:\n # Read conaninfo\n try:\n package_reference = PackageReference(reference, package_id)\n info_path = os.path.join(paths.package(package_reference,\n short_paths=None), CONANINFO)\n if not os.path.exists(info_path):\n raise NotFoundException(\"\")\n conan_info_content = load(info_path)\n conan_vars_info = ConanInfo.loads(conan_info_content).serialize_min()\n result[package_id] = conan_vars_info\n\n except Exception as exc:\n logger.error(\"Package %s has no ConanInfo file\" % str(package_reference))\n if str(exc):\n logger.error(str(exc))\n\n return result\n", "path": "conans/search/search.py"}], "after_files": [{"content": "import re\nimport os\n\nfrom fnmatch import translate\n\nfrom conans.errors import ConanException, NotFoundException\nfrom conans.model.info import ConanInfo\nfrom conans.model.ref import PackageReference, ConanFileReference\nfrom conans.paths import CONANINFO\nfrom conans.util.log import logger\nfrom conans.search.query_parse import infix_to_postfix, evaluate_postfix\nfrom conans.util.files import list_folder_subdirs, load\n\n\ndef filter_outdated(packages_infos, recipe_hash):\n result = {}\n for package_id, info in packages_infos.items():\n try: # Existing package_info of old package might not have recipe_hash\n if info[\"recipe_hash\"] != recipe_hash:\n result[package_id] = info\n except KeyError:\n pass\n return result\n\n\ndef filter_packages(query, package_infos):\n if query is None:\n return package_infos\n try:\n if \"!\" in query:\n raise ConanException(\"'!' character is not allowed\")\n if \" not \" in query or query.startswith(\"not \"):\n raise ConanException(\"'not' operator is not allowed\")\n postfix = infix_to_postfix(query) if query else []\n result = {}\n for package_id, info in package_infos.items():\n if evaluate_postfix_with_info(postfix, info):\n result[package_id] = info\n return result\n except Exception as exc:\n raise ConanException(\"Invalid package query: %s. %s\" % (query, exc))\n\n\ndef evaluate_postfix_with_info(postfix, conan_vars_info):\n\n # Evaluate conaninfo with the expression\n\n def evaluate_info(expression):\n \"\"\"Receives an expression like compiler.version=\"12\"\n Uses conan_vars_info in the closure to evaluate it\"\"\"\n name, value = expression.split(\"=\", 1)\n value = value.replace(\"\\\"\", \"\")\n return evaluate(name, value, conan_vars_info)\n\n return evaluate_postfix(postfix, evaluate_info)\n\n\ndef evaluate(prop_name, prop_value, conan_vars_info):\n \"\"\"\n Evaluates a single prop_name, prop_value like \"os\", \"Windows\" against conan_vars_info.serialize_min()\n \"\"\"\n\n def compatible_prop(setting_value, prop_value):\n return setting_value is None or prop_value == setting_value\n\n info_settings = conan_vars_info.get(\"settings\", [])\n info_options = conan_vars_info.get(\"options\", [])\n\n if prop_name in [\"os\", \"compiler\", \"arch\", \"build_type\"] or prop_name.startswith(\"compiler.\"):\n return compatible_prop(info_settings.get(prop_name, None), prop_value)\n else:\n return compatible_prop(info_options.get(prop_name, None), prop_value)\n return False\n\n\ndef search_recipes(paths, pattern=None, ignorecase=True):\n # Conan references in main storage\n if pattern:\n if isinstance(pattern, ConanFileReference):\n pattern = str(pattern)\n pattern = translate(pattern)\n pattern = re.compile(pattern, re.IGNORECASE) if ignorecase else re.compile(pattern)\n\n subdirs = list_folder_subdirs(basedir=paths.store, level=4)\n if not pattern:\n return sorted([ConanFileReference(*folder.split(\"/\")) for folder in subdirs])\n else:\n ret = []\n for subdir in subdirs:\n conan_ref = ConanFileReference(*subdir.split(\"/\"))\n if pattern:\n if _partial_match(pattern, conan_ref):\n ret.append(conan_ref)\n\n return sorted(ret)\n\n\ndef _partial_match(pattern, conan_ref):\n \"\"\"\n Finds if pattern matches any of partial sums of tokens of conan reference\n \"\"\"\n \n tokens = str(conan_ref).replace('/', ' / ').replace('@', ' @ ').split()\n\n def partial_sums(iterable):\n sum = ''\n for i in iterable:\n sum += i\n yield sum\n\n return any(map(pattern.match, list(partial_sums(tokens))))\n\ndef search_packages(paths, reference, query):\n \"\"\" Return a dict like this:\n\n {package_ID: {name: \"OpenCV\",\n version: \"2.14\",\n settings: {os: Windows}}}\n param conan_ref: ConanFileReference object\n \"\"\"\n infos = _get_local_infos_min(paths, reference)\n return filter_packages(query, infos)\n\n\ndef _get_local_infos_min(paths, reference):\n result = {}\n packages_path = paths.packages(reference)\n subdirs = list_folder_subdirs(packages_path, level=1)\n for package_id in subdirs:\n # Read conaninfo\n try:\n package_reference = PackageReference(reference, package_id)\n info_path = os.path.join(paths.package(package_reference,\n short_paths=None), CONANINFO)\n if not os.path.exists(info_path):\n raise NotFoundException(\"\")\n conan_info_content = load(info_path)\n conan_vars_info = ConanInfo.loads(conan_info_content).serialize_min()\n result[package_id] = conan_vars_info\n\n except Exception as exc:\n logger.error(\"Package %s has no ConanInfo file\" % str(package_reference))\n if str(exc):\n logger.error(str(exc))\n\n return result\n", "path": "conans/search/search.py"}]} | 1,799 | 265 |
gh_patches_debug_16835 | rasdani/github-patches | git_diff | ESMCI__cime-538 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PET tests do not work on skybridge
Skybridge insta-fails the single-threaded case because it tries to use 16 procs-per-node and the sbatch only requested 8 ppn.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `utils/python/CIME/SystemTests/pet.py`
Content:
```
1 """
2 Implementation of the CIME PET test. This class inherits from SystemTestsCommon
3
4 This is an openmp test to determine that changing thread counts does not change answers.
5 (1) do an initial run where all components are threaded by default (suffix: base)
6 (2) do another initial run with nthrds=1 for all components (suffix: single_thread)
7 """
8
9 from CIME.XML.standard_module_setup import *
10 from CIME.case_setup import case_setup
11 from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo
12
13 logger = logging.getLogger(__name__)
14
15 class PET(SystemTestsCompareTwo):
16
17 _COMPONENT_LIST = ('ATM','CPL','OCN','WAV','GLC','ICE','ROF','LND')
18
19 def __init__(self, case):
20 """
21 initialize a test object
22 """
23 SystemTestsCompareTwo.__init__(self, case,
24 separate_builds = False,
25 run_two_suffix = 'single_thread',
26 run_one_description = 'default threading',
27 run_two_description = 'threads set to 1')
28
29 def _case_one_setup(self):
30 # first make sure that all components have threaded settings
31 for comp in self._COMPONENT_LIST:
32 if self._case.get_value("NTHRDS_%s"%comp) <= 1:
33 self._case.set_value("NTHRDS_%s"%comp, 2)
34
35 # Need to redo case_setup because we may have changed the number of threads
36 case_setup(self._case, reset=True)
37
38 def _case_two_setup(self):
39 #Do a run with all threads set to 1
40 for comp in self._COMPONENT_LIST:
41 self._case.set_value("NTHRDS_%s"%comp, 1)
42
43 # Need to redo case_setup because we may have changed the number of threads
44 case_setup(self._case, reset=True)
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/utils/python/CIME/SystemTests/pet.py b/utils/python/CIME/SystemTests/pet.py
--- a/utils/python/CIME/SystemTests/pet.py
+++ b/utils/python/CIME/SystemTests/pet.py
@@ -40,5 +40,14 @@
for comp in self._COMPONENT_LIST:
self._case.set_value("NTHRDS_%s"%comp, 1)
+ # The need for this is subtle. On batch systems, the entire PET test runs
+ # under a single submission and that submission is configured based on
+ # the case settings for case 1, IE 2 threads for all components. This causes
+ # the procs-per-node to be half of what it would be for single thread. On some
+ # machines, if the mpiexec tries to exceed the procs-per-node that were given
+ # to the batch submission, things break. Setting MAX_TASKS_PER_NODE to half of
+ # it original value prevents this.
+ self._case.set_value("MAX_TASKS_PER_NODE", self._case.get_value("MAX_TASKS_PER_NODE") / 2)
+
# Need to redo case_setup because we may have changed the number of threads
case_setup(self._case, reset=True)
| {"golden_diff": "diff --git a/utils/python/CIME/SystemTests/pet.py b/utils/python/CIME/SystemTests/pet.py\n--- a/utils/python/CIME/SystemTests/pet.py\n+++ b/utils/python/CIME/SystemTests/pet.py\n@@ -40,5 +40,14 @@\n for comp in self._COMPONENT_LIST:\n self._case.set_value(\"NTHRDS_%s\"%comp, 1)\n \n+ # The need for this is subtle. On batch systems, the entire PET test runs\n+ # under a single submission and that submission is configured based on\n+ # the case settings for case 1, IE 2 threads for all components. This causes\n+ # the procs-per-node to be half of what it would be for single thread. On some\n+ # machines, if the mpiexec tries to exceed the procs-per-node that were given\n+ # to the batch submission, things break. Setting MAX_TASKS_PER_NODE to half of\n+ # it original value prevents this.\n+ self._case.set_value(\"MAX_TASKS_PER_NODE\", self._case.get_value(\"MAX_TASKS_PER_NODE\") / 2)\n+\n # Need to redo case_setup because we may have changed the number of threads\n case_setup(self._case, reset=True)\n", "issue": "PET tests do not work on skybridge\nSkybridge insta-fails the single-threaded case because it tries to use 16 procs-per-node and the sbatch only requested 8 ppn.\n\n", "before_files": [{"content": "\"\"\"\nImplementation of the CIME PET test. This class inherits from SystemTestsCommon\n\nThis is an openmp test to determine that changing thread counts does not change answers.\n(1) do an initial run where all components are threaded by default (suffix: base)\n(2) do another initial run with nthrds=1 for all components (suffix: single_thread)\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.case_setup import case_setup\nfrom CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo\n\nlogger = logging.getLogger(__name__)\n\nclass PET(SystemTestsCompareTwo):\n\n _COMPONENT_LIST = ('ATM','CPL','OCN','WAV','GLC','ICE','ROF','LND')\n\n def __init__(self, case):\n \"\"\"\n initialize a test object\n \"\"\"\n SystemTestsCompareTwo.__init__(self, case,\n separate_builds = False,\n run_two_suffix = 'single_thread',\n run_one_description = 'default threading',\n run_two_description = 'threads set to 1')\n\n def _case_one_setup(self):\n # first make sure that all components have threaded settings\n for comp in self._COMPONENT_LIST:\n if self._case.get_value(\"NTHRDS_%s\"%comp) <= 1:\n self._case.set_value(\"NTHRDS_%s\"%comp, 2)\n\n # Need to redo case_setup because we may have changed the number of threads\n case_setup(self._case, reset=True)\n\n def _case_two_setup(self):\n #Do a run with all threads set to 1\n for comp in self._COMPONENT_LIST:\n self._case.set_value(\"NTHRDS_%s\"%comp, 1)\n\n # Need to redo case_setup because we may have changed the number of threads\n case_setup(self._case, reset=True)\n", "path": "utils/python/CIME/SystemTests/pet.py"}], "after_files": [{"content": "\"\"\"\nImplementation of the CIME PET test. This class inherits from SystemTestsCommon\n\nThis is an openmp test to determine that changing thread counts does not change answers.\n(1) do an initial run where all components are threaded by default (suffix: base)\n(2) do another initial run with nthrds=1 for all components (suffix: single_thread)\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.case_setup import case_setup\nfrom CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo\n\nlogger = logging.getLogger(__name__)\n\nclass PET(SystemTestsCompareTwo):\n\n _COMPONENT_LIST = ('ATM','CPL','OCN','WAV','GLC','ICE','ROF','LND')\n\n def __init__(self, case):\n \"\"\"\n initialize a test object\n \"\"\"\n SystemTestsCompareTwo.__init__(self, case,\n separate_builds = False,\n run_two_suffix = 'single_thread',\n run_one_description = 'default threading',\n run_two_description = 'threads set to 1')\n\n def _case_one_setup(self):\n # first make sure that all components have threaded settings\n for comp in self._COMPONENT_LIST:\n if self._case.get_value(\"NTHRDS_%s\"%comp) <= 1:\n self._case.set_value(\"NTHRDS_%s\"%comp, 2)\n\n # Need to redo case_setup because we may have changed the number of threads\n case_setup(self._case, reset=True)\n\n def _case_two_setup(self):\n #Do a run with all threads set to 1\n for comp in self._COMPONENT_LIST:\n self._case.set_value(\"NTHRDS_%s\"%comp, 1)\n\n # The need for this is subtle. On batch systems, the entire PET test runs\n # under a single submission and that submission is configured based on\n # the case settings for case 1, IE 2 threads for all components. This causes\n # the procs-per-node to be half of what it would be for single thread. On some\n # machines, if the mpiexec tries to exceed the procs-per-node that were given\n # to the batch submission, things break. Setting MAX_TASKS_PER_NODE to half of\n # it original value prevents this.\n self._case.set_value(\"MAX_TASKS_PER_NODE\", self._case.get_value(\"MAX_TASKS_PER_NODE\") / 2)\n\n # Need to redo case_setup because we may have changed the number of threads\n case_setup(self._case, reset=True)\n", "path": "utils/python/CIME/SystemTests/pet.py"}]} | 790 | 279 |
gh_patches_debug_53979 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1261 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove duplicated libraries in setup.py
# Description
In `setup.py` and `setup.cfg` there are some duplicated libraries that should be removed from `setup.py`.
https://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L47
already exists as a core requirement in `setup.cfg`
https://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.cfg#L45
and so should be removed from `setup.py`.
It also isn't clear if
https://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L42
is still required, given that it was added back in PR #186 when we still used Coveralls for coverage.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup
2
3 extras_require = {
4 'shellcomplete': ['click_completion'],
5 'tensorflow': [
6 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major
7 'tensorflow-probability~=0.10.0',
8 ],
9 'torch': ['torch~=1.2'],
10 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],
11 'xmlio': [
12 'uproot3~=3.14',
13 'uproot~=4.0',
14 ], # uproot3 required until writing to ROOT supported in uproot4
15 'minuit': ['iminuit~=2.1'],
16 }
17 extras_require['backends'] = sorted(
18 set(
19 extras_require['tensorflow']
20 + extras_require['torch']
21 + extras_require['jax']
22 + extras_require['minuit']
23 )
24 )
25 extras_require['contrib'] = sorted({'matplotlib', 'requests'})
26 extras_require['lint'] = sorted({'flake8', 'black'})
27
28 extras_require['test'] = sorted(
29 set(
30 extras_require['backends']
31 + extras_require['xmlio']
32 + extras_require['contrib']
33 + extras_require['shellcomplete']
34 + [
35 'pytest~=6.0',
36 'pytest-cov>=2.5.1',
37 'pytest-mock',
38 'pytest-benchmark[histogram]',
39 'pytest-console-scripts',
40 'pytest-mpl',
41 'pydocstyle',
42 'coverage>=4.0', # coveralls
43 'papermill~=2.0',
44 'nteract-scrapbook~=0.2',
45 'jupyter',
46 'graphviz',
47 'jsonpatch',
48 ]
49 )
50 )
51 extras_require['docs'] = sorted(
52 set(
53 extras_require['xmlio']
54 + [
55 'sphinx>=3.1.2',
56 'sphinxcontrib-bibtex~=2.1',
57 'sphinx-click',
58 'sphinx_rtd_theme',
59 'nbsphinx',
60 'ipywidgets',
61 'sphinx-issues',
62 'sphinx-copybutton>0.2.9',
63 ]
64 )
65 )
66 extras_require['develop'] = sorted(
67 set(
68 extras_require['docs']
69 + extras_require['lint']
70 + extras_require['test']
71 + [
72 'nbdime',
73 'bump2version',
74 'ipython',
75 'pre-commit',
76 'check-manifest',
77 'codemetapy>=0.3.4',
78 'twine',
79 ]
80 )
81 )
82 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
83
84
85 setup(
86 extras_require=extras_require,
87 use_scm_version=lambda: {'local_scheme': lambda version: ''},
88 )
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -39,12 +39,10 @@
'pytest-console-scripts',
'pytest-mpl',
'pydocstyle',
- 'coverage>=4.0', # coveralls
'papermill~=2.0',
'nteract-scrapbook~=0.2',
'jupyter',
'graphviz',
- 'jsonpatch',
]
)
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -39,12 +39,10 @@\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n- 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n- 'jsonpatch',\n ]\n )\n )\n", "issue": "Remove duplicated libraries in setup.py\n# Description\r\n\r\nIn `setup.py` and `setup.cfg` there are some duplicated libraries that should be removed from `setup.py`.\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L47\r\n\r\nalready exists as a core requirement in `setup.cfg`\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.cfg#L45\r\n\r\nand so should be removed from `setup.py`.\r\n\r\nIt also isn't clear if \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L42\r\n\r\nis still required, given that it was added back in PR #186 when we still used Coveralls for coverage.\r\n\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],\n 'xmlio': [\n 'uproot3~=3.14',\n 'uproot~=4.0',\n ], # uproot3 required until writing to ROOT supported in uproot4\n 'minuit': ['iminuit~=2.1'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],\n 'xmlio': [\n 'uproot3~=3.14',\n 'uproot~=4.0',\n ], # uproot3 required until writing to ROOT supported in uproot4\n 'minuit': ['iminuit~=2.1'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,280 | 109 |
gh_patches_debug_28166 | rasdani/github-patches | git_diff | svthalia__concrexit-1818 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add payment_type or full payment to event admin API
### Motivation
`api/v2/admin/events/<eventPk>/registrations/` currently only gives the uuid of a payment, so to display in the admin screen how it was paid, the payment must be requested separately. Doing this for all of the registrations would be very inefficient (like 40 extra requests to load the event admin). If we simply add the payment_type or replace the payment uuid with a payment serializer, it will be much simpler.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/events/api/v2/serializers/event_registration.py`
Content:
```
1 from rest_framework import serializers
2
3 from events.models import EventRegistration
4 from members.api.v2.serializers.member import MemberSerializer
5
6
7 class EventRegistrationSerializer(serializers.ModelSerializer):
8 """Serializer for event registrations."""
9
10 def __init__(self, *args, **kwargs):
11 # Don't pass the 'fields' arg up to the superclass
12 fields = kwargs.pop("fields", {"pk", "member", "name"})
13
14 # Instantiate the superclass normally
15 super().__init__(*args, **kwargs)
16
17 allowed = set(fields)
18 existing = set(self.fields.keys())
19 for field_name in existing - allowed:
20 self.fields.pop(field_name)
21
22 class Meta:
23 model = EventRegistration
24 fields = (
25 "pk",
26 "present",
27 "queue_position",
28 "date",
29 "payment",
30 "member",
31 "name",
32 )
33
34 member = MemberSerializer(detailed=False, read_only=True)
35
```
Path: `website/events/api/v2/admin/serializers/event_registration.py`
Content:
```
1 from rest_framework import serializers
2
3 from events.models import EventRegistration
4 from members.api.v2.serializers.member import MemberSerializer
5 from members.models import Member
6
7
8 class EventRegistrationAdminSerializer(serializers.ModelSerializer):
9 """Serializer for event registrations."""
10
11 class Meta:
12 model = EventRegistration
13 fields = (
14 "pk",
15 "present",
16 "queue_position",
17 "date",
18 "date_cancelled",
19 "payment",
20 "member",
21 "name",
22 )
23 read_only_fields = ("payment",)
24
25 def to_internal_value(self, data):
26 self.fields["member"] = serializers.PrimaryKeyRelatedField(
27 queryset=Member.objects.all()
28 )
29 return super().to_internal_value(data)
30
31 def to_representation(self, instance):
32 self.fields["member"] = MemberSerializer(detailed=False, read_only=True)
33 return super().to_representation(instance)
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/events/api/v2/admin/serializers/event_registration.py b/website/events/api/v2/admin/serializers/event_registration.py
--- a/website/events/api/v2/admin/serializers/event_registration.py
+++ b/website/events/api/v2/admin/serializers/event_registration.py
@@ -3,6 +3,7 @@
from events.models import EventRegistration
from members.api.v2.serializers.member import MemberSerializer
from members.models import Member
+from payments.api.v2.serializers import PaymentSerializer
class EventRegistrationAdminSerializer(serializers.ModelSerializer):
@@ -22,6 +23,8 @@
)
read_only_fields = ("payment",)
+ payment = PaymentSerializer()
+
def to_internal_value(self, data):
self.fields["member"] = serializers.PrimaryKeyRelatedField(
queryset=Member.objects.all()
diff --git a/website/events/api/v2/serializers/event_registration.py b/website/events/api/v2/serializers/event_registration.py
--- a/website/events/api/v2/serializers/event_registration.py
+++ b/website/events/api/v2/serializers/event_registration.py
@@ -2,6 +2,7 @@
from events.models import EventRegistration
from members.api.v2.serializers.member import MemberSerializer
+from payments.api.v2.serializers import PaymentSerializer
class EventRegistrationSerializer(serializers.ModelSerializer):
@@ -31,4 +32,5 @@
"name",
)
+ payment = PaymentSerializer()
member = MemberSerializer(detailed=False, read_only=True)
| {"golden_diff": "diff --git a/website/events/api/v2/admin/serializers/event_registration.py b/website/events/api/v2/admin/serializers/event_registration.py\n--- a/website/events/api/v2/admin/serializers/event_registration.py\n+++ b/website/events/api/v2/admin/serializers/event_registration.py\n@@ -3,6 +3,7 @@\n from events.models import EventRegistration\n from members.api.v2.serializers.member import MemberSerializer\n from members.models import Member\n+from payments.api.v2.serializers import PaymentSerializer\n \n \n class EventRegistrationAdminSerializer(serializers.ModelSerializer):\n@@ -22,6 +23,8 @@\n )\n read_only_fields = (\"payment\",)\n \n+ payment = PaymentSerializer()\n+\n def to_internal_value(self, data):\n self.fields[\"member\"] = serializers.PrimaryKeyRelatedField(\n queryset=Member.objects.all()\ndiff --git a/website/events/api/v2/serializers/event_registration.py b/website/events/api/v2/serializers/event_registration.py\n--- a/website/events/api/v2/serializers/event_registration.py\n+++ b/website/events/api/v2/serializers/event_registration.py\n@@ -2,6 +2,7 @@\n \n from events.models import EventRegistration\n from members.api.v2.serializers.member import MemberSerializer\n+from payments.api.v2.serializers import PaymentSerializer\n \n \n class EventRegistrationSerializer(serializers.ModelSerializer):\n@@ -31,4 +32,5 @@\n \"name\",\n )\n \n+ payment = PaymentSerializer()\n member = MemberSerializer(detailed=False, read_only=True)\n", "issue": "Add payment_type or full payment to event admin API\n### Motivation\r\n`api/v2/admin/events/<eventPk>/registrations/` currently only gives the uuid of a payment, so to display in the admin screen how it was paid, the payment must be requested separately. Doing this for all of the registrations would be very inefficient (like 40 extra requests to load the event admin). If we simply add the payment_type or replace the payment uuid with a payment serializer, it will be much simpler.\r\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom events.models import EventRegistration\nfrom members.api.v2.serializers.member import MemberSerializer\n\n\nclass EventRegistrationSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for event registrations.\"\"\"\n\n def __init__(self, *args, **kwargs):\n # Don't pass the 'fields' arg up to the superclass\n fields = kwargs.pop(\"fields\", {\"pk\", \"member\", \"name\"})\n\n # Instantiate the superclass normally\n super().__init__(*args, **kwargs)\n\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n class Meta:\n model = EventRegistration\n fields = (\n \"pk\",\n \"present\",\n \"queue_position\",\n \"date\",\n \"payment\",\n \"member\",\n \"name\",\n )\n\n member = MemberSerializer(detailed=False, read_only=True)\n", "path": "website/events/api/v2/serializers/event_registration.py"}, {"content": "from rest_framework import serializers\n\nfrom events.models import EventRegistration\nfrom members.api.v2.serializers.member import MemberSerializer\nfrom members.models import Member\n\n\nclass EventRegistrationAdminSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for event registrations.\"\"\"\n\n class Meta:\n model = EventRegistration\n fields = (\n \"pk\",\n \"present\",\n \"queue_position\",\n \"date\",\n \"date_cancelled\",\n \"payment\",\n \"member\",\n \"name\",\n )\n read_only_fields = (\"payment\",)\n\n def to_internal_value(self, data):\n self.fields[\"member\"] = serializers.PrimaryKeyRelatedField(\n queryset=Member.objects.all()\n )\n return super().to_internal_value(data)\n\n def to_representation(self, instance):\n self.fields[\"member\"] = MemberSerializer(detailed=False, read_only=True)\n return super().to_representation(instance)\n", "path": "website/events/api/v2/admin/serializers/event_registration.py"}], "after_files": [{"content": "from rest_framework import serializers\n\nfrom events.models import EventRegistration\nfrom members.api.v2.serializers.member import MemberSerializer\nfrom payments.api.v2.serializers import PaymentSerializer\n\n\nclass EventRegistrationSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for event registrations.\"\"\"\n\n def __init__(self, *args, **kwargs):\n # Don't pass the 'fields' arg up to the superclass\n fields = kwargs.pop(\"fields\", {\"pk\", \"member\", \"name\"})\n\n # Instantiate the superclass normally\n super().__init__(*args, **kwargs)\n\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n class Meta:\n model = EventRegistration\n fields = (\n \"pk\",\n \"present\",\n \"queue_position\",\n \"date\",\n \"payment\",\n \"member\",\n \"name\",\n )\n\n payment = PaymentSerializer()\n member = MemberSerializer(detailed=False, read_only=True)\n", "path": "website/events/api/v2/serializers/event_registration.py"}, {"content": "from rest_framework import serializers\n\nfrom events.models import EventRegistration\nfrom members.api.v2.serializers.member import MemberSerializer\nfrom members.models import Member\nfrom payments.api.v2.serializers import PaymentSerializer\n\n\nclass EventRegistrationAdminSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for event registrations.\"\"\"\n\n class Meta:\n model = EventRegistration\n fields = (\n \"pk\",\n \"present\",\n \"queue_position\",\n \"date\",\n \"date_cancelled\",\n \"payment\",\n \"member\",\n \"name\",\n )\n read_only_fields = (\"payment\",)\n\n payment = PaymentSerializer()\n\n def to_internal_value(self, data):\n self.fields[\"member\"] = serializers.PrimaryKeyRelatedField(\n queryset=Member.objects.all()\n )\n return super().to_internal_value(data)\n\n def to_representation(self, instance):\n self.fields[\"member\"] = MemberSerializer(detailed=False, read_only=True)\n return super().to_representation(instance)\n", "path": "website/events/api/v2/admin/serializers/event_registration.py"}]} | 895 | 329 |
gh_patches_debug_16215 | rasdani/github-patches | git_diff | sktime__sktime-1705 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DOC] transformers tutorial
There should be a notebook that explains the different transformer scitypes, and how transformers work in `sktime`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/datatypes/_series/_examples.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Example generation for testing.
3
4 Exports dict of examples, useful for testing as fixtures.
5
6 example_dict: dict indexed by triple
7 1st element = mtype - str
8 2nd element = considered as this scitype - str
9 3rd element = int - index of example
10 elements are data objects, considered examples for the mtype
11 all examples with same index are considered "same" on scitype content
12 if None, indicates that representation is not possible
13
14 example_lossy: dict of bool indexed by triple
15 1st element = mtype - str
16 2nd element = considered as this scitype - str
17 3rd element = int - index of example
18 elements are bool, indicate whether representation has information removed
19 all examples with same index are considered "same" on scitype content
20
21 example_metadata: dict of metadata dict, indexed by pair
22 1st element = considered as this scitype - str
23 2nd element = int - index of example
24 (there is no "mtype" element, as properties are equal for all mtypes)
25 elements are metadata dict, as returned by check_is_mtype
26 used as expected return of check_is_mtype in tests
27
28 overall, conversions from non-lossy representations to any other ones
29 should yield the element exactly, identidally (given same index)
30 """
31
32 import numpy as np
33 import pandas as pd
34
35 from sktime.utils.validation._dependencies import _check_soft_dependencies
36
37 example_dict = dict()
38 example_dict_lossy = dict()
39 example_dict_metadata = dict()
40
41 ###
42 # example 0: univariate
43
44 s = pd.Series([1, 4, 0.5, -3], dtype=np.float64, name="a")
45
46 example_dict[("pd.Series", "Series", 0)] = s
47 example_dict_lossy[("pd.Series", "Series", 0)] = False
48
49 df = pd.DataFrame({"a": [1, 4, 0.5, -3]})
50
51 example_dict[("pd.DataFrame", "Series", 0)] = df
52 example_dict_lossy[("pd.DataFrame", "Series", 0)] = False
53
54 arr = np.array([[1], [4], [0.5], [-3]])
55
56 example_dict[("np.ndarray", "Series", 0)] = arr
57 example_dict_lossy[("np.ndarray", "Series", 0)] = True
58
59 if _check_soft_dependencies("xarray", severity="none"):
60 import xarray as xr
61
62 da = xr.DataArray(
63 [[1], [4], [0.5], [-3]],
64 coords=[[0, 1, 2, 3], ["a"]],
65 )
66
67 example_dict[("xr.DataArray", "Series", 0)] = da
68 example_dict_lossy[("xr.DataArray", "Series", 0)] = False
69
70
71 example_dict_metadata[("Series", 0)] = {
72 "is_univariate": True,
73 "is_equally_spaced": True,
74 "is_empty": False,
75 "has_nans": False,
76 }
77
78 ###
79 # example 1: multivariate
80
81 example_dict[("pd.Series", "Series", 1)] = None
82 example_dict_lossy[("pd.Series", "Series", 1)] = None
83
84 df = pd.DataFrame({"a": [1, 4, 0.5, -3], "b": [3, 7, 2, -3 / 7]})
85
86 example_dict[("pd.DataFrame", "Series", 1)] = df
87 example_dict_lossy[("pd.DataFrame", "Series", 1)] = False
88
89 arr = np.array([[1, 3], [4, 7], [0.5, 2], [-3, -3 / 7]])
90
91 example_dict[("np.ndarray", "Series", 1)] = arr
92 example_dict_lossy[("np.ndarray", "Series", 1)] = True
93 if _check_soft_dependencies("xarray", severity="none"):
94 import xarray as xr
95
96 da = xr.DataArray(
97 [[1, 3], [4, 7], [0.5, 2], [-3, -3 / 7]],
98 coords=[[0, 1, 2, 3], ["a", "b"]],
99 )
100
101 example_dict[("xr.DataArray", "Series", 1)] = da
102 example_dict_lossy[("xr.DataArray", "Series", 1)] = False
103
104 example_dict_metadata[("Series", 1)] = {
105 "is_univariate": False,
106 "is_equally_spaced": True,
107 "is_empty": False,
108 "has_nans": False,
109 }
110
111 ###
112 # example 2: multivariate, positive
113
114 example_dict[("pd.Series", "Series", 2)] = None
115 example_dict_lossy[("pd.Series", "Series", 2)] = None
116
117 df = pd.DataFrame({"a": [1, 4, 0.5, 3], "b": [3, 7, 2, 3 / 7]})
118
119 example_dict[("pd.DataFrame", "Series", 2)] = df
120 example_dict_lossy[("pd.DataFrame", "Series", 2)] = False
121
122 arr = np.array([[1, 3], [4, 7], [0.5, 2], [3, 3 / 7]])
123
124 example_dict[("np.ndarray", "Series", 2)] = arr
125 example_dict_lossy[("np.ndarray", "Series", 2)] = True
126
127 if _check_soft_dependencies("xarray", severity="none"):
128 import xarray as xr
129
130 da = xr.DataArray(
131 [[1, 3], [4, 7], [0.5, 2], [3, 3 / 7]],
132 coords=[[0, 1, 2, 3], ["a", "b"]],
133 )
134
135 example_dict[("xr.DataArray", "Series", 2)] = da
136 example_dict_lossy[("xr.DataArray", "Series", 2)] = False
137
138
139 example_dict_metadata[("Series", 2)] = {
140 "is_univariate": False,
141 "is_equally_spaced": True,
142 "is_empty": False,
143 "has_nans": False,
144 }
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sktime/datatypes/_series/_examples.py b/sktime/datatypes/_series/_examples.py
--- a/sktime/datatypes/_series/_examples.py
+++ b/sktime/datatypes/_series/_examples.py
@@ -142,3 +142,39 @@
"is_empty": False,
"has_nans": False,
}
+
+###
+# example 3: univariate, positive
+
+s = pd.Series([1, 4, 0.5, 3], dtype=np.float64, name="a")
+
+example_dict[("pd.Series", "Series", 3)] = s
+example_dict_lossy[("pd.Series", "Series", 3)] = False
+
+df = pd.DataFrame({"a": [1, 4, 0.5, 3]})
+
+example_dict[("pd.DataFrame", "Series", 3)] = df
+example_dict_lossy[("pd.DataFrame", "Series", 3)] = False
+
+arr = np.array([[1], [4], [0.5], [3]])
+
+example_dict[("np.ndarray", "Series", 3)] = arr
+example_dict_lossy[("np.ndarray", "Series", 3)] = True
+
+if _check_soft_dependencies("xarray", severity="none"):
+ import xarray as xr
+
+ da = xr.DataArray(
+ [[1], [4], [0.5], [3]],
+ coords=[[0, 1, 2, 3], ["a"]],
+ )
+
+ example_dict[("xr.DataArray", "Series", 3)] = da
+ example_dict_lossy[("xr.DataArray", "Series", 3)] = False
+
+example_dict_metadata[("Series", 3)] = {
+ "is_univariate": True,
+ "is_equally_spaced": True,
+ "is_empty": False,
+ "has_nans": False,
+}
| {"golden_diff": "diff --git a/sktime/datatypes/_series/_examples.py b/sktime/datatypes/_series/_examples.py\n--- a/sktime/datatypes/_series/_examples.py\n+++ b/sktime/datatypes/_series/_examples.py\n@@ -142,3 +142,39 @@\n \"is_empty\": False,\n \"has_nans\": False,\n }\n+\n+###\n+# example 3: univariate, positive\n+\n+s = pd.Series([1, 4, 0.5, 3], dtype=np.float64, name=\"a\")\n+\n+example_dict[(\"pd.Series\", \"Series\", 3)] = s\n+example_dict_lossy[(\"pd.Series\", \"Series\", 3)] = False\n+\n+df = pd.DataFrame({\"a\": [1, 4, 0.5, 3]})\n+\n+example_dict[(\"pd.DataFrame\", \"Series\", 3)] = df\n+example_dict_lossy[(\"pd.DataFrame\", \"Series\", 3)] = False\n+\n+arr = np.array([[1], [4], [0.5], [3]])\n+\n+example_dict[(\"np.ndarray\", \"Series\", 3)] = arr\n+example_dict_lossy[(\"np.ndarray\", \"Series\", 3)] = True\n+\n+if _check_soft_dependencies(\"xarray\", severity=\"none\"):\n+ import xarray as xr\n+\n+ da = xr.DataArray(\n+ [[1], [4], [0.5], [3]],\n+ coords=[[0, 1, 2, 3], [\"a\"]],\n+ )\n+\n+ example_dict[(\"xr.DataArray\", \"Series\", 3)] = da\n+ example_dict_lossy[(\"xr.DataArray\", \"Series\", 3)] = False\n+\n+example_dict_metadata[(\"Series\", 3)] = {\n+ \"is_univariate\": True,\n+ \"is_equally_spaced\": True,\n+ \"is_empty\": False,\n+ \"has_nans\": False,\n+}\n", "issue": "[DOC] transformers tutorial\nThere should be a notebook that explains the different transformer scitypes, and how transformers work in `sktime`.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Example generation for testing.\n\nExports dict of examples, useful for testing as fixtures.\n\nexample_dict: dict indexed by triple\n 1st element = mtype - str\n 2nd element = considered as this scitype - str\n 3rd element = int - index of example\nelements are data objects, considered examples for the mtype\n all examples with same index are considered \"same\" on scitype content\n if None, indicates that representation is not possible\n\nexample_lossy: dict of bool indexed by triple\n 1st element = mtype - str\n 2nd element = considered as this scitype - str\n 3rd element = int - index of example\nelements are bool, indicate whether representation has information removed\n all examples with same index are considered \"same\" on scitype content\n\nexample_metadata: dict of metadata dict, indexed by pair\n 1st element = considered as this scitype - str\n 2nd element = int - index of example\n (there is no \"mtype\" element, as properties are equal for all mtypes)\nelements are metadata dict, as returned by check_is_mtype\n used as expected return of check_is_mtype in tests\n\noverall, conversions from non-lossy representations to any other ones\n should yield the element exactly, identidally (given same index)\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.utils.validation._dependencies import _check_soft_dependencies\n\nexample_dict = dict()\nexample_dict_lossy = dict()\nexample_dict_metadata = dict()\n\n###\n# example 0: univariate\n\ns = pd.Series([1, 4, 0.5, -3], dtype=np.float64, name=\"a\")\n\nexample_dict[(\"pd.Series\", \"Series\", 0)] = s\nexample_dict_lossy[(\"pd.Series\", \"Series\", 0)] = False\n\ndf = pd.DataFrame({\"a\": [1, 4, 0.5, -3]})\n\nexample_dict[(\"pd.DataFrame\", \"Series\", 0)] = df\nexample_dict_lossy[(\"pd.DataFrame\", \"Series\", 0)] = False\n\narr = np.array([[1], [4], [0.5], [-3]])\n\nexample_dict[(\"np.ndarray\", \"Series\", 0)] = arr\nexample_dict_lossy[(\"np.ndarray\", \"Series\", 0)] = True\n\nif _check_soft_dependencies(\"xarray\", severity=\"none\"):\n import xarray as xr\n\n da = xr.DataArray(\n [[1], [4], [0.5], [-3]],\n coords=[[0, 1, 2, 3], [\"a\"]],\n )\n\n example_dict[(\"xr.DataArray\", \"Series\", 0)] = da\n example_dict_lossy[(\"xr.DataArray\", \"Series\", 0)] = False\n\n\nexample_dict_metadata[(\"Series\", 0)] = {\n \"is_univariate\": True,\n \"is_equally_spaced\": True,\n \"is_empty\": False,\n \"has_nans\": False,\n}\n\n###\n# example 1: multivariate\n\nexample_dict[(\"pd.Series\", \"Series\", 1)] = None\nexample_dict_lossy[(\"pd.Series\", \"Series\", 1)] = None\n\ndf = pd.DataFrame({\"a\": [1, 4, 0.5, -3], \"b\": [3, 7, 2, -3 / 7]})\n\nexample_dict[(\"pd.DataFrame\", \"Series\", 1)] = df\nexample_dict_lossy[(\"pd.DataFrame\", \"Series\", 1)] = False\n\narr = np.array([[1, 3], [4, 7], [0.5, 2], [-3, -3 / 7]])\n\nexample_dict[(\"np.ndarray\", \"Series\", 1)] = arr\nexample_dict_lossy[(\"np.ndarray\", \"Series\", 1)] = True\nif _check_soft_dependencies(\"xarray\", severity=\"none\"):\n import xarray as xr\n\n da = xr.DataArray(\n [[1, 3], [4, 7], [0.5, 2], [-3, -3 / 7]],\n coords=[[0, 1, 2, 3], [\"a\", \"b\"]],\n )\n\n example_dict[(\"xr.DataArray\", \"Series\", 1)] = da\n example_dict_lossy[(\"xr.DataArray\", \"Series\", 1)] = False\n\nexample_dict_metadata[(\"Series\", 1)] = {\n \"is_univariate\": False,\n \"is_equally_spaced\": True,\n \"is_empty\": False,\n \"has_nans\": False,\n}\n\n###\n# example 2: multivariate, positive\n\nexample_dict[(\"pd.Series\", \"Series\", 2)] = None\nexample_dict_lossy[(\"pd.Series\", \"Series\", 2)] = None\n\ndf = pd.DataFrame({\"a\": [1, 4, 0.5, 3], \"b\": [3, 7, 2, 3 / 7]})\n\nexample_dict[(\"pd.DataFrame\", \"Series\", 2)] = df\nexample_dict_lossy[(\"pd.DataFrame\", \"Series\", 2)] = False\n\narr = np.array([[1, 3], [4, 7], [0.5, 2], [3, 3 / 7]])\n\nexample_dict[(\"np.ndarray\", \"Series\", 2)] = arr\nexample_dict_lossy[(\"np.ndarray\", \"Series\", 2)] = True\n\nif _check_soft_dependencies(\"xarray\", severity=\"none\"):\n import xarray as xr\n\n da = xr.DataArray(\n [[1, 3], [4, 7], [0.5, 2], [3, 3 / 7]],\n coords=[[0, 1, 2, 3], [\"a\", \"b\"]],\n )\n\n example_dict[(\"xr.DataArray\", \"Series\", 2)] = da\n example_dict_lossy[(\"xr.DataArray\", \"Series\", 2)] = False\n\n\nexample_dict_metadata[(\"Series\", 2)] = {\n \"is_univariate\": False,\n \"is_equally_spaced\": True,\n \"is_empty\": False,\n \"has_nans\": False,\n}\n", "path": "sktime/datatypes/_series/_examples.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Example generation for testing.\n\nExports dict of examples, useful for testing as fixtures.\n\nexample_dict: dict indexed by triple\n 1st element = mtype - str\n 2nd element = considered as this scitype - str\n 3rd element = int - index of example\nelements are data objects, considered examples for the mtype\n all examples with same index are considered \"same\" on scitype content\n if None, indicates that representation is not possible\n\nexample_lossy: dict of bool indexed by triple\n 1st element = mtype - str\n 2nd element = considered as this scitype - str\n 3rd element = int - index of example\nelements are bool, indicate whether representation has information removed\n all examples with same index are considered \"same\" on scitype content\n\nexample_metadata: dict of metadata dict, indexed by pair\n 1st element = considered as this scitype - str\n 2nd element = int - index of example\n (there is no \"mtype\" element, as properties are equal for all mtypes)\nelements are metadata dict, as returned by check_is_mtype\n used as expected return of check_is_mtype in tests\n\noverall, conversions from non-lossy representations to any other ones\n should yield the element exactly, identidally (given same index)\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.utils.validation._dependencies import _check_soft_dependencies\n\nexample_dict = dict()\nexample_dict_lossy = dict()\nexample_dict_metadata = dict()\n\n###\n# example 0: univariate\n\ns = pd.Series([1, 4, 0.5, -3], dtype=np.float64, name=\"a\")\n\nexample_dict[(\"pd.Series\", \"Series\", 0)] = s\nexample_dict_lossy[(\"pd.Series\", \"Series\", 0)] = False\n\ndf = pd.DataFrame({\"a\": [1, 4, 0.5, -3]})\n\nexample_dict[(\"pd.DataFrame\", \"Series\", 0)] = df\nexample_dict_lossy[(\"pd.DataFrame\", \"Series\", 0)] = False\n\narr = np.array([[1], [4], [0.5], [-3]])\n\nexample_dict[(\"np.ndarray\", \"Series\", 0)] = arr\nexample_dict_lossy[(\"np.ndarray\", \"Series\", 0)] = True\n\nif _check_soft_dependencies(\"xarray\", severity=\"none\"):\n import xarray as xr\n\n da = xr.DataArray(\n [[1], [4], [0.5], [-3]],\n coords=[[0, 1, 2, 3], [\"a\"]],\n )\n\n example_dict[(\"xr.DataArray\", \"Series\", 0)] = da\n example_dict_lossy[(\"xr.DataArray\", \"Series\", 0)] = False\n\n\nexample_dict_metadata[(\"Series\", 0)] = {\n \"is_univariate\": True,\n \"is_equally_spaced\": True,\n \"is_empty\": False,\n \"has_nans\": False,\n}\n\n###\n# example 1: multivariate\n\nexample_dict[(\"pd.Series\", \"Series\", 1)] = None\nexample_dict_lossy[(\"pd.Series\", \"Series\", 1)] = None\n\ndf = pd.DataFrame({\"a\": [1, 4, 0.5, -3], \"b\": [3, 7, 2, -3 / 7]})\n\nexample_dict[(\"pd.DataFrame\", \"Series\", 1)] = df\nexample_dict_lossy[(\"pd.DataFrame\", \"Series\", 1)] = False\n\narr = np.array([[1, 3], [4, 7], [0.5, 2], [-3, -3 / 7]])\n\nexample_dict[(\"np.ndarray\", \"Series\", 1)] = arr\nexample_dict_lossy[(\"np.ndarray\", \"Series\", 1)] = True\nif _check_soft_dependencies(\"xarray\", severity=\"none\"):\n import xarray as xr\n\n da = xr.DataArray(\n [[1, 3], [4, 7], [0.5, 2], [-3, -3 / 7]],\n coords=[[0, 1, 2, 3], [\"a\", \"b\"]],\n )\n\n example_dict[(\"xr.DataArray\", \"Series\", 1)] = da\n example_dict_lossy[(\"xr.DataArray\", \"Series\", 1)] = False\n\nexample_dict_metadata[(\"Series\", 1)] = {\n \"is_univariate\": False,\n \"is_equally_spaced\": True,\n \"is_empty\": False,\n \"has_nans\": False,\n}\n\n###\n# example 2: multivariate, positive\n\nexample_dict[(\"pd.Series\", \"Series\", 2)] = None\nexample_dict_lossy[(\"pd.Series\", \"Series\", 2)] = None\n\ndf = pd.DataFrame({\"a\": [1, 4, 0.5, 3], \"b\": [3, 7, 2, 3 / 7]})\n\nexample_dict[(\"pd.DataFrame\", \"Series\", 2)] = df\nexample_dict_lossy[(\"pd.DataFrame\", \"Series\", 2)] = False\n\narr = np.array([[1, 3], [4, 7], [0.5, 2], [3, 3 / 7]])\n\nexample_dict[(\"np.ndarray\", \"Series\", 2)] = arr\nexample_dict_lossy[(\"np.ndarray\", \"Series\", 2)] = True\n\nif _check_soft_dependencies(\"xarray\", severity=\"none\"):\n import xarray as xr\n\n da = xr.DataArray(\n [[1, 3], [4, 7], [0.5, 2], [3, 3 / 7]],\n coords=[[0, 1, 2, 3], [\"a\", \"b\"]],\n )\n\n example_dict[(\"xr.DataArray\", \"Series\", 2)] = da\n example_dict_lossy[(\"xr.DataArray\", \"Series\", 2)] = False\n\n\nexample_dict_metadata[(\"Series\", 2)] = {\n \"is_univariate\": False,\n \"is_equally_spaced\": True,\n \"is_empty\": False,\n \"has_nans\": False,\n}\n\n###\n# example 3: univariate, positive\n\ns = pd.Series([1, 4, 0.5, 3], dtype=np.float64, name=\"a\")\n\nexample_dict[(\"pd.Series\", \"Series\", 3)] = s\nexample_dict_lossy[(\"pd.Series\", \"Series\", 3)] = False\n\ndf = pd.DataFrame({\"a\": [1, 4, 0.5, 3]})\n\nexample_dict[(\"pd.DataFrame\", \"Series\", 3)] = df\nexample_dict_lossy[(\"pd.DataFrame\", \"Series\", 3)] = False\n\narr = np.array([[1], [4], [0.5], [3]])\n\nexample_dict[(\"np.ndarray\", \"Series\", 3)] = arr\nexample_dict_lossy[(\"np.ndarray\", \"Series\", 3)] = True\n\nif _check_soft_dependencies(\"xarray\", severity=\"none\"):\n import xarray as xr\n\n da = xr.DataArray(\n [[1], [4], [0.5], [3]],\n coords=[[0, 1, 2, 3], [\"a\"]],\n )\n\n example_dict[(\"xr.DataArray\", \"Series\", 3)] = da\n example_dict_lossy[(\"xr.DataArray\", \"Series\", 3)] = False\n\nexample_dict_metadata[(\"Series\", 3)] = {\n \"is_univariate\": True,\n \"is_equally_spaced\": True,\n \"is_empty\": False,\n \"has_nans\": False,\n}\n", "path": "sktime/datatypes/_series/_examples.py"}]} | 2,020 | 442 |
gh_patches_debug_8067 | rasdani/github-patches | git_diff | conda__conda-7525 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
should_bypass_proxies still an issue in 4.5.7
https://github.com/conda/conda/issues/7506#issuecomment-403811279
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/gateways/connection/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3 from functools import partial
4
5 def should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy):
6 # Monkey patch requests, per https://github.com/requests/requests/pull/4723
7 if url.startswith("file://"):
8 return True
9 try:
10 return should_bypass_proxies_func(url, no_proxy)
11 except TypeError:
12 # For versions of requests we shouldn't have to deal with.
13 # https://github.com/conda/conda/issues/7503
14 # https://github.com/conda/conda/issues/7506
15 return should_bypass_proxies_func(url)
16
17
18 try:
19 from requests import ConnectionError, HTTPError, Session
20 from requests.adapters import BaseAdapter, HTTPAdapter
21 from requests.auth import AuthBase, _basic_auth_str
22 from requests.cookies import extract_cookies_to_jar
23 from requests.exceptions import InvalidSchema, SSLError
24 from requests.hooks import dispatch_hook
25 from requests.models import Response
26 from requests.packages.urllib3.exceptions import InsecureRequestWarning
27 from requests.structures import CaseInsensitiveDict
28 from requests.utils import get_auth_from_url, get_netrc_auth
29
30 # monkeypatch requests
31 from requests.utils import should_bypass_proxies
32 import requests.utils
33 requests.utils.should_bypass_proxies = partial(should_bypass_proxies_patched,
34 should_bypass_proxies)
35 except ImportError: # pragma: no cover
36 from pip._vendor.requests import ConnectionError, HTTPError, Session
37 from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
38 from pip._vendor.requests.auth import AuthBase, _basic_auth_str
39 from pip._vendor.requests.cookies import extract_cookies_to_jar
40 from pip._vendor.requests.exceptions import InvalidSchema, SSLError
41 from pip._vendor.requests.hooks import dispatch_hook
42 from pip._vendor.requests.models import Response
43 from pip._vendor.requests.packages.urllib3.exceptions import InsecureRequestWarning
44 from pip._vendor.requests.structures import CaseInsensitiveDict
45 from pip._vendor.requests.utils import get_auth_from_url, get_netrc_auth
46
47 # monkeypatch requests
48 from pip._vendor.requests.utils import should_bypass_proxies
49 import pip._vendor.requests.utils
50 pip._vendor.requests.utils.should_bypass_proxies = partial(should_bypass_proxies_patched,
51 should_bypass_proxies)
52
53
54 dispatch_hook = dispatch_hook
55 BaseAdapter = BaseAdapter
56 Response = Response
57 CaseInsensitiveDict = CaseInsensitiveDict
58 Session = Session
59 HTTPAdapter = HTTPAdapter
60 AuthBase = AuthBase
61 _basic_auth_str = _basic_auth_str
62 extract_cookies_to_jar = extract_cookies_to_jar
63 get_auth_from_url = get_auth_from_url
64 get_netrc_auth = get_netrc_auth
65 ConnectionError = ConnectionError
66 HTTPError = HTTPError
67 InvalidSchema = InvalidSchema
68 SSLError = SSLError
69 InsecureRequestWarning = InsecureRequestWarning
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda/gateways/connection/__init__.py b/conda/gateways/connection/__init__.py
--- a/conda/gateways/connection/__init__.py
+++ b/conda/gateways/connection/__init__.py
@@ -2,7 +2,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from functools import partial
-def should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy):
+def should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy=None):
# Monkey patch requests, per https://github.com/requests/requests/pull/4723
if url.startswith("file://"):
return True
| {"golden_diff": "diff --git a/conda/gateways/connection/__init__.py b/conda/gateways/connection/__init__.py\n--- a/conda/gateways/connection/__init__.py\n+++ b/conda/gateways/connection/__init__.py\n@@ -2,7 +2,7 @@\n from __future__ import absolute_import, division, print_function, unicode_literals\n from functools import partial\n \n-def should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy):\n+def should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy=None):\n # Monkey patch requests, per https://github.com/requests/requests/pull/4723\n if url.startswith(\"file://\"):\n return True\n", "issue": "should_bypass_proxies still an issue in 4.5.7\nhttps://github.com/conda/conda/issues/7506#issuecomment-403811279\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom functools import partial\n\ndef should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy):\n # Monkey patch requests, per https://github.com/requests/requests/pull/4723\n if url.startswith(\"file://\"):\n return True\n try:\n return should_bypass_proxies_func(url, no_proxy)\n except TypeError:\n # For versions of requests we shouldn't have to deal with.\n # https://github.com/conda/conda/issues/7503\n # https://github.com/conda/conda/issues/7506\n return should_bypass_proxies_func(url)\n\n\ntry:\n from requests import ConnectionError, HTTPError, Session\n from requests.adapters import BaseAdapter, HTTPAdapter\n from requests.auth import AuthBase, _basic_auth_str\n from requests.cookies import extract_cookies_to_jar\n from requests.exceptions import InvalidSchema, SSLError\n from requests.hooks import dispatch_hook\n from requests.models import Response\n from requests.packages.urllib3.exceptions import InsecureRequestWarning\n from requests.structures import CaseInsensitiveDict\n from requests.utils import get_auth_from_url, get_netrc_auth\n\n # monkeypatch requests\n from requests.utils import should_bypass_proxies\n import requests.utils\n requests.utils.should_bypass_proxies = partial(should_bypass_proxies_patched,\n should_bypass_proxies)\nexcept ImportError: # pragma: no cover\n from pip._vendor.requests import ConnectionError, HTTPError, Session\n from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter\n from pip._vendor.requests.auth import AuthBase, _basic_auth_str\n from pip._vendor.requests.cookies import extract_cookies_to_jar\n from pip._vendor.requests.exceptions import InvalidSchema, SSLError\n from pip._vendor.requests.hooks import dispatch_hook\n from pip._vendor.requests.models import Response\n from pip._vendor.requests.packages.urllib3.exceptions import InsecureRequestWarning\n from pip._vendor.requests.structures import CaseInsensitiveDict\n from pip._vendor.requests.utils import get_auth_from_url, get_netrc_auth\n\n # monkeypatch requests\n from pip._vendor.requests.utils import should_bypass_proxies\n import pip._vendor.requests.utils\n pip._vendor.requests.utils.should_bypass_proxies = partial(should_bypass_proxies_patched,\n should_bypass_proxies)\n\n\ndispatch_hook = dispatch_hook\nBaseAdapter = BaseAdapter\nResponse = Response\nCaseInsensitiveDict = CaseInsensitiveDict\nSession = Session\nHTTPAdapter = HTTPAdapter\nAuthBase = AuthBase\n_basic_auth_str = _basic_auth_str\nextract_cookies_to_jar = extract_cookies_to_jar\nget_auth_from_url = get_auth_from_url\nget_netrc_auth = get_netrc_auth\nConnectionError = ConnectionError\nHTTPError = HTTPError\nInvalidSchema = InvalidSchema\nSSLError = SSLError\nInsecureRequestWarning = InsecureRequestWarning\n", "path": "conda/gateways/connection/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom functools import partial\n\ndef should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy=None):\n # Monkey patch requests, per https://github.com/requests/requests/pull/4723\n if url.startswith(\"file://\"):\n return True\n try:\n return should_bypass_proxies_func(url, no_proxy)\n except TypeError:\n # For versions of requests we shouldn't have to deal with.\n # https://github.com/conda/conda/issues/7503\n # https://github.com/conda/conda/issues/7506\n return should_bypass_proxies_func(url)\n\n\ntry:\n from requests import ConnectionError, HTTPError, Session\n from requests.adapters import BaseAdapter, HTTPAdapter\n from requests.auth import AuthBase, _basic_auth_str\n from requests.cookies import extract_cookies_to_jar\n from requests.exceptions import InvalidSchema, SSLError\n from requests.hooks import dispatch_hook\n from requests.models import Response\n from requests.packages.urllib3.exceptions import InsecureRequestWarning\n from requests.structures import CaseInsensitiveDict\n from requests.utils import get_auth_from_url, get_netrc_auth\n\n # monkeypatch requests\n from requests.utils import should_bypass_proxies\n import requests.utils\n requests.utils.should_bypass_proxies = partial(should_bypass_proxies_patched,\n should_bypass_proxies)\nexcept ImportError: # pragma: no cover\n from pip._vendor.requests import ConnectionError, HTTPError, Session\n from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter\n from pip._vendor.requests.auth import AuthBase, _basic_auth_str\n from pip._vendor.requests.cookies import extract_cookies_to_jar\n from pip._vendor.requests.exceptions import InvalidSchema, SSLError\n from pip._vendor.requests.hooks import dispatch_hook\n from pip._vendor.requests.models import Response\n from pip._vendor.requests.packages.urllib3.exceptions import InsecureRequestWarning\n from pip._vendor.requests.structures import CaseInsensitiveDict\n from pip._vendor.requests.utils import get_auth_from_url, get_netrc_auth\n\n # monkeypatch requests\n from pip._vendor.requests.utils import should_bypass_proxies\n import pip._vendor.requests.utils\n pip._vendor.requests.utils.should_bypass_proxies = partial(should_bypass_proxies_patched,\n should_bypass_proxies)\n\n\ndispatch_hook = dispatch_hook\nBaseAdapter = BaseAdapter\nResponse = Response\nCaseInsensitiveDict = CaseInsensitiveDict\nSession = Session\nHTTPAdapter = HTTPAdapter\nAuthBase = AuthBase\n_basic_auth_str = _basic_auth_str\nextract_cookies_to_jar = extract_cookies_to_jar\nget_auth_from_url = get_auth_from_url\nget_netrc_auth = get_netrc_auth\nConnectionError = ConnectionError\nHTTPError = HTTPError\nInvalidSchema = InvalidSchema\nSSLError = SSLError\nInsecureRequestWarning = InsecureRequestWarning\n", "path": "conda/gateways/connection/__init__.py"}]} | 1,099 | 162 |
gh_patches_debug_18080 | rasdani/github-patches | git_diff | mozilla__bugbug-1251 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Set label as 0 in the QANeeded model when one of the qa flags are removed
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bugbug/models/qaneeded.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import xgboost
7 from imblearn.under_sampling import RandomUnderSampler
8 from sklearn.compose import ColumnTransformer
9 from sklearn.feature_extraction import DictVectorizer
10 from sklearn.pipeline import Pipeline
11
12 from bugbug import bug_features, bugzilla, feature_cleanup
13 from bugbug.model import BugModel
14
15
16 class QANeededModel(BugModel):
17 def __init__(self, lemmatization=False):
18 BugModel.__init__(self, lemmatization)
19
20 self.sampler = RandomUnderSampler(random_state=0)
21
22 feature_extractors = [
23 bug_features.has_str(),
24 bug_features.has_regression_range(),
25 bug_features.severity(),
26 bug_features.keywords({"qawanted"}),
27 bug_features.is_coverity_issue(),
28 bug_features.has_crash_signature(),
29 bug_features.has_url(),
30 bug_features.has_w3c_url(),
31 bug_features.has_github_url(),
32 bug_features.whiteboard(),
33 bug_features.patches(),
34 bug_features.landings(),
35 ]
36
37 cleanup_functions = [
38 feature_cleanup.fileref(),
39 feature_cleanup.url(),
40 feature_cleanup.synonyms(),
41 ]
42
43 self.extraction_pipeline = Pipeline(
44 [
45 (
46 "bug_extractor",
47 bug_features.BugExtractor(
48 feature_extractors,
49 cleanup_functions,
50 rollback=True,
51 rollback_when=self.rollback,
52 ),
53 ),
54 (
55 "union",
56 ColumnTransformer(
57 [
58 ("data", DictVectorizer(), "data"),
59 ("title", self.text_vectorizer(), "title"),
60 ("comments", self.text_vectorizer(), "comments"),
61 ]
62 ),
63 ),
64 ]
65 )
66
67 self.clf = xgboost.XGBClassifier(n_jobs=16)
68 self.clf.set_params(predictor="cpu_predictor")
69
70 def rollback(self, change):
71 return any(
72 change["added"].startswith(prefix)
73 for prefix in ["qawanted", "qe-verify", "qaurgent"]
74 )
75
76 def get_labels(self):
77 classes = {}
78
79 for bug_data in bugzilla.get_bugs():
80 bug_id = int(bug_data["id"])
81
82 found_qa = False
83 if any(
84 keyword.startswith(label)
85 for keyword in bug_data["keywords"]
86 for label in ["qawanted", "qe-verify", "qaurgent"]
87 ):
88 classes[bug_id] = 1
89 found_qa = True
90
91 if not found_qa:
92 for entry in bug_data["history"]:
93 for change in entry["changes"]:
94 if any(
95 change["added"].startswith(label)
96 for label in ["qawanted", "qe-verify", "qaurgent"]
97 ):
98 classes[bug_id] = 1
99 if bug_id not in classes:
100 classes[bug_id] = 0
101
102 return classes, [0, 1]
103
104 def get_feature_names(self):
105 return self.extraction_pipeline.named_steps["union"].get_feature_names()
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bugbug/models/qaneeded.py b/bugbug/models/qaneeded.py
--- a/bugbug/models/qaneeded.py
+++ b/bugbug/models/qaneeded.py
@@ -91,11 +91,18 @@
if not found_qa:
for entry in bug_data["history"]:
for change in entry["changes"]:
+ if any(
+ change["removed"].startswith(label)
+ for label in ["qawanted", "qe-verify", "qaurgent"]
+ ):
+ classes[bug_id] = 0
+
if any(
change["added"].startswith(label)
for label in ["qawanted", "qe-verify", "qaurgent"]
):
classes[bug_id] = 1
+
if bug_id not in classes:
classes[bug_id] = 0
| {"golden_diff": "diff --git a/bugbug/models/qaneeded.py b/bugbug/models/qaneeded.py\n--- a/bugbug/models/qaneeded.py\n+++ b/bugbug/models/qaneeded.py\n@@ -91,11 +91,18 @@\n if not found_qa:\n for entry in bug_data[\"history\"]:\n for change in entry[\"changes\"]:\n+ if any(\n+ change[\"removed\"].startswith(label)\n+ for label in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n+ ):\n+ classes[bug_id] = 0\n+\n if any(\n change[\"added\"].startswith(label)\n for label in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n ):\n classes[bug_id] = 1\n+\n if bug_id not in classes:\n classes[bug_id] = 0\n", "issue": "Set label as 0 in the QANeeded model when one of the qa flags are removed\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup\nfrom bugbug.model import BugModel\n\n\nclass QANeededModel(BugModel):\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n self.sampler = RandomUnderSampler(random_state=0)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords({\"qawanted\"}),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(\n feature_extractors,\n cleanup_functions,\n rollback=True,\n rollback_when=self.rollback,\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(), \"title\"),\n (\"comments\", self.text_vectorizer(), \"comments\"),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def rollback(self, change):\n return any(\n change[\"added\"].startswith(prefix)\n for prefix in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n )\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs():\n bug_id = int(bug_data[\"id\"])\n\n found_qa = False\n if any(\n keyword.startswith(label)\n for keyword in bug_data[\"keywords\"]\n for label in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n ):\n classes[bug_id] = 1\n found_qa = True\n\n if not found_qa:\n for entry in bug_data[\"history\"]:\n for change in entry[\"changes\"]:\n if any(\n change[\"added\"].startswith(label)\n for label in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n ):\n classes[bug_id] = 1\n if bug_id not in classes:\n classes[bug_id] = 0\n\n return classes, [0, 1]\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n", "path": "bugbug/models/qaneeded.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup\nfrom bugbug.model import BugModel\n\n\nclass QANeededModel(BugModel):\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n self.sampler = RandomUnderSampler(random_state=0)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords({\"qawanted\"}),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(\n feature_extractors,\n cleanup_functions,\n rollback=True,\n rollback_when=self.rollback,\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(), \"title\"),\n (\"comments\", self.text_vectorizer(), \"comments\"),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def rollback(self, change):\n return any(\n change[\"added\"].startswith(prefix)\n for prefix in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n )\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs():\n bug_id = int(bug_data[\"id\"])\n\n found_qa = False\n if any(\n keyword.startswith(label)\n for keyword in bug_data[\"keywords\"]\n for label in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n ):\n classes[bug_id] = 1\n found_qa = True\n\n if not found_qa:\n for entry in bug_data[\"history\"]:\n for change in entry[\"changes\"]:\n if any(\n change[\"removed\"].startswith(label)\n for label in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n ):\n classes[bug_id] = 0\n\n if any(\n change[\"added\"].startswith(label)\n for label in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n ):\n classes[bug_id] = 1\n\n if bug_id not in classes:\n classes[bug_id] = 0\n\n return classes, [0, 1]\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n", "path": "bugbug/models/qaneeded.py"}]} | 1,178 | 193 |
gh_patches_debug_21022 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1983 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
localisation string for a shelf is missing
**Describe the bug**
A translation exists, but the string "Currently reading" is shown in English language
**To Reproduce**
Switch language to non-english and check book status
**Expected behavior**
Translated string used instead of English
**Screenshots**

**Instance**
https://ziurkes.group.lt
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/templatetags/shelf_tags.py`
Content:
```
1 """ Filters and tags related to shelving books """
2 from django import template
3
4 from bookwyrm import models
5 from bookwyrm.utils import cache
6
7
8 register = template.Library()
9
10
11 @register.filter(name="is_book_on_shelf")
12 def get_is_book_on_shelf(book, shelf):
13 """is a book on a shelf"""
14 return cache.get_or_set(
15 f"book-on-shelf-{book.id}-{shelf.id}",
16 lambda b, s: s.books.filter(id=b.id).exists(),
17 book,
18 shelf,
19 timeout=15552000,
20 )
21
22
23 @register.filter(name="next_shelf")
24 def get_next_shelf(current_shelf):
25 """shelf you'd use to update reading progress"""
26 if current_shelf == "to-read":
27 return "reading"
28 if current_shelf == "reading":
29 return "read"
30 if current_shelf == "read":
31 return "complete"
32 return "to-read"
33
34
35 @register.simple_tag(takes_context=True)
36 def active_shelf(context, book):
37 """check what shelf a user has a book on, if any"""
38 user = context["request"].user
39 return cache.get_or_set(
40 f"active_shelf-{user.id}-{book.id}",
41 lambda u, b: (
42 models.ShelfBook.objects.filter(
43 shelf__user=u,
44 book__parent_work__editions=b,
45 ).first()
46 or False
47 ),
48 user,
49 book,
50 timeout=15552000,
51 ) or {"book": book}
52
53
54 @register.simple_tag(takes_context=False)
55 def latest_read_through(book, user):
56 """the most recent read activity"""
57 return cache.get_or_set(
58 f"latest_read_through-{user.id}-{book.id}",
59 lambda u, b: (
60 models.ReadThrough.objects.filter(user=u, book=b, is_active=True)
61 .order_by("-start_date")
62 .first()
63 or False
64 ),
65 user,
66 book,
67 timeout=15552000,
68 )
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/templatetags/shelf_tags.py b/bookwyrm/templatetags/shelf_tags.py
--- a/bookwyrm/templatetags/shelf_tags.py
+++ b/bookwyrm/templatetags/shelf_tags.py
@@ -1,5 +1,6 @@
""" Filters and tags related to shelving books """
from django import template
+from django.utils.translation import gettext_lazy as _
from bookwyrm import models
from bookwyrm.utils import cache
@@ -32,6 +33,24 @@
return "to-read"
[email protected](name="translate_shelf_name")
+def get_translated_shelf_name(shelf):
+ """produced translated shelf nidentifierame"""
+ if not shelf:
+ return ""
+ # support obj or dict
+ identifier = shelf["identifier"] if isinstance(shelf, dict) else shelf.identifier
+ if identifier == "all":
+ return _("All books")
+ if identifier == "to-read":
+ return _("To Read")
+ if identifier == "reading":
+ return _("Currently Reading")
+ if identifier == "read":
+ return _("Read")
+ return shelf["name"] if isinstance(shelf, dict) else shelf.name
+
+
@register.simple_tag(takes_context=True)
def active_shelf(context, book):
"""check what shelf a user has a book on, if any"""
| {"golden_diff": "diff --git a/bookwyrm/templatetags/shelf_tags.py b/bookwyrm/templatetags/shelf_tags.py\n--- a/bookwyrm/templatetags/shelf_tags.py\n+++ b/bookwyrm/templatetags/shelf_tags.py\n@@ -1,5 +1,6 @@\n \"\"\" Filters and tags related to shelving books \"\"\"\n from django import template\n+from django.utils.translation import gettext_lazy as _\n \n from bookwyrm import models\n from bookwyrm.utils import cache\n@@ -32,6 +33,24 @@\n return \"to-read\"\n \n \[email protected](name=\"translate_shelf_name\")\n+def get_translated_shelf_name(shelf):\n+ \"\"\"produced translated shelf nidentifierame\"\"\"\n+ if not shelf:\n+ return \"\"\n+ # support obj or dict\n+ identifier = shelf[\"identifier\"] if isinstance(shelf, dict) else shelf.identifier\n+ if identifier == \"all\":\n+ return _(\"All books\")\n+ if identifier == \"to-read\":\n+ return _(\"To Read\")\n+ if identifier == \"reading\":\n+ return _(\"Currently Reading\")\n+ if identifier == \"read\":\n+ return _(\"Read\")\n+ return shelf[\"name\"] if isinstance(shelf, dict) else shelf.name\n+\n+\n @register.simple_tag(takes_context=True)\n def active_shelf(context, book):\n \"\"\"check what shelf a user has a book on, if any\"\"\"\n", "issue": "localisation string for a shelf is missing\n**Describe the bug**\r\nA translation exists, but the string \"Currently reading\"\u00a0is shown in English language\r\n\r\n**To Reproduce**\r\nSwitch language to non-english and check book status\r\n\r\n**Expected behavior**\r\nTranslated string used instead of English\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n**Instance**\r\nhttps://ziurkes.group.lt\r\n\r\n\n", "before_files": [{"content": "\"\"\" Filters and tags related to shelving books \"\"\"\nfrom django import template\n\nfrom bookwyrm import models\nfrom bookwyrm.utils import cache\n\n\nregister = template.Library()\n\n\[email protected](name=\"is_book_on_shelf\")\ndef get_is_book_on_shelf(book, shelf):\n \"\"\"is a book on a shelf\"\"\"\n return cache.get_or_set(\n f\"book-on-shelf-{book.id}-{shelf.id}\",\n lambda b, s: s.books.filter(id=b.id).exists(),\n book,\n shelf,\n timeout=15552000,\n )\n\n\[email protected](name=\"next_shelf\")\ndef get_next_shelf(current_shelf):\n \"\"\"shelf you'd use to update reading progress\"\"\"\n if current_shelf == \"to-read\":\n return \"reading\"\n if current_shelf == \"reading\":\n return \"read\"\n if current_shelf == \"read\":\n return \"complete\"\n return \"to-read\"\n\n\[email protected]_tag(takes_context=True)\ndef active_shelf(context, book):\n \"\"\"check what shelf a user has a book on, if any\"\"\"\n user = context[\"request\"].user\n return cache.get_or_set(\n f\"active_shelf-{user.id}-{book.id}\",\n lambda u, b: (\n models.ShelfBook.objects.filter(\n shelf__user=u,\n book__parent_work__editions=b,\n ).first()\n or False\n ),\n user,\n book,\n timeout=15552000,\n ) or {\"book\": book}\n\n\[email protected]_tag(takes_context=False)\ndef latest_read_through(book, user):\n \"\"\"the most recent read activity\"\"\"\n return cache.get_or_set(\n f\"latest_read_through-{user.id}-{book.id}\",\n lambda u, b: (\n models.ReadThrough.objects.filter(user=u, book=b, is_active=True)\n .order_by(\"-start_date\")\n .first()\n or False\n ),\n user,\n book,\n timeout=15552000,\n )\n", "path": "bookwyrm/templatetags/shelf_tags.py"}], "after_files": [{"content": "\"\"\" Filters and tags related to shelving books \"\"\"\nfrom django import template\nfrom django.utils.translation import gettext_lazy as _\n\nfrom bookwyrm import models\nfrom bookwyrm.utils import cache\n\n\nregister = template.Library()\n\n\[email protected](name=\"is_book_on_shelf\")\ndef get_is_book_on_shelf(book, shelf):\n \"\"\"is a book on a shelf\"\"\"\n return cache.get_or_set(\n f\"book-on-shelf-{book.id}-{shelf.id}\",\n lambda b, s: s.books.filter(id=b.id).exists(),\n book,\n shelf,\n timeout=15552000,\n )\n\n\[email protected](name=\"next_shelf\")\ndef get_next_shelf(current_shelf):\n \"\"\"shelf you'd use to update reading progress\"\"\"\n if current_shelf == \"to-read\":\n return \"reading\"\n if current_shelf == \"reading\":\n return \"read\"\n if current_shelf == \"read\":\n return \"complete\"\n return \"to-read\"\n\n\[email protected](name=\"translate_shelf_name\")\ndef get_translated_shelf_name(shelf):\n \"\"\"produced translated shelf nidentifierame\"\"\"\n if not shelf:\n return \"\"\n # support obj or dict\n identifier = shelf[\"identifier\"] if isinstance(shelf, dict) else shelf.identifier\n if identifier == \"all\":\n return _(\"All books\")\n if identifier == \"to-read\":\n return _(\"To Read\")\n if identifier == \"reading\":\n return _(\"Currently Reading\")\n if identifier == \"read\":\n return _(\"Read\")\n return shelf[\"name\"] if isinstance(shelf, dict) else shelf.name\n\n\[email protected]_tag(takes_context=True)\ndef active_shelf(context, book):\n \"\"\"check what shelf a user has a book on, if any\"\"\"\n user = context[\"request\"].user\n return cache.get_or_set(\n f\"active_shelf-{user.id}-{book.id}\",\n lambda u, b: (\n models.ShelfBook.objects.filter(\n shelf__user=u,\n book__parent_work__editions=b,\n ).first()\n or False\n ),\n user,\n book,\n timeout=15552000,\n ) or {\"book\": book}\n\n\[email protected]_tag(takes_context=False)\ndef latest_read_through(book, user):\n \"\"\"the most recent read activity\"\"\"\n return cache.get_or_set(\n f\"latest_read_through-{user.id}-{book.id}\",\n lambda u, b: (\n models.ReadThrough.objects.filter(user=u, book=b, is_active=True)\n .order_by(\"-start_date\")\n .first()\n or False\n ),\n user,\n book,\n timeout=15552000,\n )\n", "path": "bookwyrm/templatetags/shelf_tags.py"}]} | 978 | 313 |
gh_patches_debug_21542 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-202 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Too many false positives when detecting wake word
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mycroft/client/speech/local_recognizer.py`
Content:
```
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17
18
19 import time
20
21 import os
22 from pocketsphinx.pocketsphinx import Decoder
23
24 __author__ = 'seanfitz, jdorleans'
25
26 BASEDIR = os.path.dirname(os.path.abspath(__file__))
27
28
29 class LocalRecognizer(object):
30 def __init__(self, sample_rate=16000, lang="en-us", key_phrase="mycroft"):
31 self.lang = lang
32 self.key_phrase = key_phrase
33 self.sample_rate = sample_rate
34 self.configure()
35
36 def configure(self):
37 config = Decoder.default_config()
38 config.set_string('-hmm', os.path.join(BASEDIR, 'model', self.lang,
39 'hmm'))
40 config.set_string('-dict', os.path.join(BASEDIR, 'model', self.lang,
41 'mycroft-en-us.dict'))
42 config.set_string('-keyphrase', self.key_phrase)
43 config.set_float('-kws_threshold', float('1e-45'))
44 config.set_float('-samprate', self.sample_rate)
45 config.set_int('-nfft', 2048)
46 config.set_string('-logfn', '/dev/null')
47 self.decoder = Decoder(config)
48
49 def transcribe(self, byte_data, metrics=None):
50 start = time.time()
51 self.decoder.start_utt()
52 self.decoder.process_raw(byte_data, False, False)
53 self.decoder.end_utt()
54 if metrics:
55 metrics.timer("mycroft.stt.local.time_s", time.time() - start)
56 return self.decoder.hyp()
57
58 def is_recognized(self, byte_data, metrics):
59 hyp = self.transcribe(byte_data, metrics)
60 return hyp and self.key_phrase in hyp.hypstr.lower()
61
62 def found_wake_word(self, hypothesis):
63 return hypothesis and self.key_phrase in hypothesis.hypstr.lower()
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mycroft/client/speech/local_recognizer.py b/mycroft/client/speech/local_recognizer.py
--- a/mycroft/client/speech/local_recognizer.py
+++ b/mycroft/client/speech/local_recognizer.py
@@ -27,7 +27,8 @@
class LocalRecognizer(object):
- def __init__(self, sample_rate=16000, lang="en-us", key_phrase="mycroft"):
+ def __init__(self, sample_rate=16000, lang="en-us",
+ key_phrase="hey mycroft"):
self.lang = lang
self.key_phrase = key_phrase
self.sample_rate = sample_rate
@@ -40,7 +41,7 @@
config.set_string('-dict', os.path.join(BASEDIR, 'model', self.lang,
'mycroft-en-us.dict'))
config.set_string('-keyphrase', self.key_phrase)
- config.set_float('-kws_threshold', float('1e-45'))
+ config.set_float('-kws_threshold', float('1e-90'))
config.set_float('-samprate', self.sample_rate)
config.set_int('-nfft', 2048)
config.set_string('-logfn', '/dev/null')
| {"golden_diff": "diff --git a/mycroft/client/speech/local_recognizer.py b/mycroft/client/speech/local_recognizer.py\n--- a/mycroft/client/speech/local_recognizer.py\n+++ b/mycroft/client/speech/local_recognizer.py\n@@ -27,7 +27,8 @@\n \n \n class LocalRecognizer(object):\n- def __init__(self, sample_rate=16000, lang=\"en-us\", key_phrase=\"mycroft\"):\n+ def __init__(self, sample_rate=16000, lang=\"en-us\",\n+ key_phrase=\"hey mycroft\"):\n self.lang = lang\n self.key_phrase = key_phrase\n self.sample_rate = sample_rate\n@@ -40,7 +41,7 @@\n config.set_string('-dict', os.path.join(BASEDIR, 'model', self.lang,\n 'mycroft-en-us.dict'))\n config.set_string('-keyphrase', self.key_phrase)\n- config.set_float('-kws_threshold', float('1e-45'))\n+ config.set_float('-kws_threshold', float('1e-90'))\n config.set_float('-samprate', self.sample_rate)\n config.set_int('-nfft', 2048)\n config.set_string('-logfn', '/dev/null')\n", "issue": "Too many false positives when detecting wake word\n\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport time\n\nimport os\nfrom pocketsphinx.pocketsphinx import Decoder\n\n__author__ = 'seanfitz, jdorleans'\n\nBASEDIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass LocalRecognizer(object):\n def __init__(self, sample_rate=16000, lang=\"en-us\", key_phrase=\"mycroft\"):\n self.lang = lang\n self.key_phrase = key_phrase\n self.sample_rate = sample_rate\n self.configure()\n\n def configure(self):\n config = Decoder.default_config()\n config.set_string('-hmm', os.path.join(BASEDIR, 'model', self.lang,\n 'hmm'))\n config.set_string('-dict', os.path.join(BASEDIR, 'model', self.lang,\n 'mycroft-en-us.dict'))\n config.set_string('-keyphrase', self.key_phrase)\n config.set_float('-kws_threshold', float('1e-45'))\n config.set_float('-samprate', self.sample_rate)\n config.set_int('-nfft', 2048)\n config.set_string('-logfn', '/dev/null')\n self.decoder = Decoder(config)\n\n def transcribe(self, byte_data, metrics=None):\n start = time.time()\n self.decoder.start_utt()\n self.decoder.process_raw(byte_data, False, False)\n self.decoder.end_utt()\n if metrics:\n metrics.timer(\"mycroft.stt.local.time_s\", time.time() - start)\n return self.decoder.hyp()\n\n def is_recognized(self, byte_data, metrics):\n hyp = self.transcribe(byte_data, metrics)\n return hyp and self.key_phrase in hyp.hypstr.lower()\n\n def found_wake_word(self, hypothesis):\n return hypothesis and self.key_phrase in hypothesis.hypstr.lower()\n", "path": "mycroft/client/speech/local_recognizer.py"}], "after_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport time\n\nimport os\nfrom pocketsphinx.pocketsphinx import Decoder\n\n__author__ = 'seanfitz, jdorleans'\n\nBASEDIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass LocalRecognizer(object):\n def __init__(self, sample_rate=16000, lang=\"en-us\",\n key_phrase=\"hey mycroft\"):\n self.lang = lang\n self.key_phrase = key_phrase\n self.sample_rate = sample_rate\n self.configure()\n\n def configure(self):\n config = Decoder.default_config()\n config.set_string('-hmm', os.path.join(BASEDIR, 'model', self.lang,\n 'hmm'))\n config.set_string('-dict', os.path.join(BASEDIR, 'model', self.lang,\n 'mycroft-en-us.dict'))\n config.set_string('-keyphrase', self.key_phrase)\n config.set_float('-kws_threshold', float('1e-90'))\n config.set_float('-samprate', self.sample_rate)\n config.set_int('-nfft', 2048)\n config.set_string('-logfn', '/dev/null')\n self.decoder = Decoder(config)\n\n def transcribe(self, byte_data, metrics=None):\n start = time.time()\n self.decoder.start_utt()\n self.decoder.process_raw(byte_data, False, False)\n self.decoder.end_utt()\n if metrics:\n metrics.timer(\"mycroft.stt.local.time_s\", time.time() - start)\n return self.decoder.hyp()\n\n def is_recognized(self, byte_data, metrics):\n hyp = self.transcribe(byte_data, metrics)\n return hyp and self.key_phrase in hyp.hypstr.lower()\n\n def found_wake_word(self, hypothesis):\n return hypothesis and self.key_phrase in hypothesis.hypstr.lower()\n", "path": "mycroft/client/speech/local_recognizer.py"}]} | 944 | 277 |
gh_patches_debug_38061 | rasdani/github-patches | git_diff | onnx__onnx-5693 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature request] Expose lexical scope context in Python checker
### System information
Latest
### What is the problem that this feature solves?
Currently lexical scope context is not exposed in Python onnx.checker.
### Alternatives considered
_No response_
### Describe the feature
Follow up of https://github.com/onnx/onnx/pull/4720. Expose lexical scope context in Python onnx.checker. See https://github.com/onnx/onnx/blob/3747442528c820ab8dd41111ef3e9ab1a4da6062/onnx/cpp2py_export.cc#L378
### Will this influence the current api (Y/N)?
Y. Extended parameters will be added.
### Feature Area
checker
### Are you willing to contribute it (Y/N)
Yes
### Notes
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `onnx/checker.py`
Content:
```
1 # Copyright (c) ONNX Project Contributors
2 #
3 # SPDX-License-Identifier: Apache-2.0
4 """Graph utilities for checking whether an ONNX proto message is legal."""
5
6 from __future__ import annotations
7
8 __all__ = [
9 "check_attribute",
10 "check_function",
11 "check_graph",
12 "check_model",
13 "check_node",
14 "check_sparse_tensor",
15 "check_tensor",
16 "check_value_info",
17 "DEFAULT_CONTEXT",
18 "ValidationError",
19 "C",
20 "MAXIMUM_PROTOBUF",
21 ]
22
23 import os
24 import sys
25 from typing import Any, Callable, TypeVar
26
27 from google.protobuf.message import Message
28
29 import onnx.defs
30 import onnx.onnx_cpp2py_export.checker as C # noqa: N812
31 import onnx.shape_inference
32 from onnx import (
33 IR_VERSION,
34 AttributeProto,
35 FunctionProto,
36 GraphProto,
37 ModelProto,
38 NodeProto,
39 SparseTensorProto,
40 TensorProto,
41 ValueInfoProto,
42 helper,
43 )
44
45 # Limitation of single protobuf file is 2GB
46 MAXIMUM_PROTOBUF = 2000000000
47
48 # TODO: This thing where we reserialize the protobuf back into the
49 # string, only to deserialize it at the call site, is really goofy.
50 # Stop doing that.
51
52
53 # NB: Please don't edit this context!
54 DEFAULT_CONTEXT = C.CheckerContext()
55 DEFAULT_CONTEXT.ir_version = IR_VERSION
56 # TODO: Maybe ONNX-ML should also be defaulted?
57 DEFAULT_CONTEXT.opset_imports = {"": onnx.defs.onnx_opset_version()}
58
59
60 FuncType = TypeVar("FuncType", bound=Callable[..., Any])
61
62
63 def _ensure_proto_type(proto: Message, proto_type: type[Message]) -> None:
64 if not isinstance(proto, proto_type):
65 raise TypeError(
66 f"The proto message needs to be of type '{proto_type.__name__}'"
67 )
68
69
70 def check_value_info(
71 value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT
72 ) -> None:
73 _ensure_proto_type(value_info, ValueInfoProto)
74 return C.check_value_info(value_info.SerializeToString(), ctx)
75
76
77 def check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
78 _ensure_proto_type(tensor, TensorProto)
79 return C.check_tensor(tensor.SerializeToString(), ctx)
80
81
82 def check_attribute(
83 attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT
84 ) -> None:
85 _ensure_proto_type(attr, AttributeProto)
86 return C.check_attribute(attr.SerializeToString(), ctx)
87
88
89 def check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
90 _ensure_proto_type(node, NodeProto)
91 return C.check_node(node.SerializeToString(), ctx)
92
93
94 def check_function(
95 function: FunctionProto, ctx: C.CheckerContext | None = None
96 ) -> None:
97 _ensure_proto_type(function, FunctionProto)
98 if ctx is None:
99 ctx = C.CheckerContext()
100 ctx.ir_version = helper.find_min_ir_version_for(
101 list(function.opset_import), True
102 )
103 function_opset_dic = {}
104 for domain_version in function.opset_import:
105 function_opset_dic[domain_version.domain] = domain_version.version
106 ctx.opset_imports = function_opset_dic
107 C.check_function(function.SerializeToString(), ctx)
108
109
110 def check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
111 _ensure_proto_type(graph, GraphProto)
112 return C.check_graph(graph.SerializeToString(), ctx)
113
114
115 def check_sparse_tensor(
116 sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT
117 ) -> None:
118 _ensure_proto_type(sparse, SparseTensorProto)
119 C.check_sparse_tensor(sparse.SerializeToString(), ctx)
120
121
122 def check_model(
123 model: ModelProto | str | bytes | os.PathLike,
124 full_check: bool = False,
125 skip_opset_compatibility_check: bool = False,
126 ) -> None:
127 """Check the consistency of a model.
128
129 An exception will be raised if the model's ir_version is not set
130 properly or is higher than checker's ir_version, or if the model
131 has duplicate keys in metadata_props.
132
133 If IR version >= 3, the model must specify opset_import.
134 If IR version < 3, the model cannot have any opset_import specified.
135
136 Args:
137 model: Model to check. If model is a path, the function checks model
138 path first. If the model bytes size is larger than 2GB, function
139 should be called using model path.
140 full_check: If True, the function also runs shape inference check.
141 skip_opset_compatibility_check: If True, the function skips the check for
142 opset compatibility.
143 """
144 # If model is a path instead of ModelProto
145 if isinstance(model, (str, os.PathLike)):
146 C.check_model_path(os.fspath(model), full_check, skip_opset_compatibility_check)
147 else:
148 protobuf_string = (
149 model if isinstance(model, bytes) else model.SerializeToString()
150 )
151 # If the protobuf is larger than 2GB,
152 # remind users should use the model path to check
153 if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:
154 raise ValueError(
155 "This protobuf of onnx model is too large (>2GB). Call check_model with model path instead."
156 )
157 C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
158
159
160 ValidationError = C.ValidationError
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/onnx/checker.py b/onnx/checker.py
--- a/onnx/checker.py
+++ b/onnx/checker.py
@@ -15,6 +15,7 @@
"check_tensor",
"check_value_info",
"DEFAULT_CONTEXT",
+ "LEXICAL_SCOPE_CONTEXT",
"ValidationError",
"C",
"MAXIMUM_PROTOBUF",
@@ -39,7 +40,6 @@
SparseTensorProto,
TensorProto,
ValueInfoProto,
- helper,
)
# Limitation of single protobuf file is 2GB
@@ -56,6 +56,8 @@
# TODO: Maybe ONNX-ML should also be defaulted?
DEFAULT_CONTEXT.opset_imports = {"": onnx.defs.onnx_opset_version()}
+LEXICAL_SCOPE_CONTEXT = C.LexicalScopeContext()
+
FuncType = TypeVar("FuncType", bound=Callable[..., Any])
@@ -80,36 +82,39 @@
def check_attribute(
- attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT
+ attr: AttributeProto,
+ ctx: C.CheckerContext = DEFAULT_CONTEXT,
+ lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,
) -> None:
_ensure_proto_type(attr, AttributeProto)
- return C.check_attribute(attr.SerializeToString(), ctx)
+ return C.check_attribute(attr.SerializeToString(), ctx, lex_ctx)
-def check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
+def check_node(
+ node: NodeProto,
+ ctx: C.CheckerContext = DEFAULT_CONTEXT,
+ lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,
+) -> None:
_ensure_proto_type(node, NodeProto)
- return C.check_node(node.SerializeToString(), ctx)
+ return C.check_node(node.SerializeToString(), ctx, lex_ctx)
def check_function(
- function: FunctionProto, ctx: C.CheckerContext | None = None
+ function: FunctionProto,
+ ctx: C.CheckerContext,
+ lex_ctx: C.LexicalScopeContext,
) -> None:
_ensure_proto_type(function, FunctionProto)
- if ctx is None:
- ctx = C.CheckerContext()
- ctx.ir_version = helper.find_min_ir_version_for(
- list(function.opset_import), True
- )
- function_opset_dic = {}
- for domain_version in function.opset_import:
- function_opset_dic[domain_version.domain] = domain_version.version
- ctx.opset_imports = function_opset_dic
- C.check_function(function.SerializeToString(), ctx)
+ C.check_function(function.SerializeToString(), ctx, lex_ctx)
-def check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
+def check_graph(
+ graph: GraphProto,
+ ctx: C.CheckerContext = DEFAULT_CONTEXT,
+ lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,
+) -> None:
_ensure_proto_type(graph, GraphProto)
- return C.check_graph(graph.SerializeToString(), ctx)
+ return C.check_graph(graph.SerializeToString(), ctx, lex_ctx)
def check_sparse_tensor(
| {"golden_diff": "diff --git a/onnx/checker.py b/onnx/checker.py\n--- a/onnx/checker.py\n+++ b/onnx/checker.py\n@@ -15,6 +15,7 @@\n \"check_tensor\",\n \"check_value_info\",\n \"DEFAULT_CONTEXT\",\n+ \"LEXICAL_SCOPE_CONTEXT\",\n \"ValidationError\",\n \"C\",\n \"MAXIMUM_PROTOBUF\",\n@@ -39,7 +40,6 @@\n SparseTensorProto,\n TensorProto,\n ValueInfoProto,\n- helper,\n )\n \n # Limitation of single protobuf file is 2GB\n@@ -56,6 +56,8 @@\n # TODO: Maybe ONNX-ML should also be defaulted?\n DEFAULT_CONTEXT.opset_imports = {\"\": onnx.defs.onnx_opset_version()}\n \n+LEXICAL_SCOPE_CONTEXT = C.LexicalScopeContext()\n+\n \n FuncType = TypeVar(\"FuncType\", bound=Callable[..., Any])\n \n@@ -80,36 +82,39 @@\n \n \n def check_attribute(\n- attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n+ attr: AttributeProto,\n+ ctx: C.CheckerContext = DEFAULT_CONTEXT,\n+ lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n ) -> None:\n _ensure_proto_type(attr, AttributeProto)\n- return C.check_attribute(attr.SerializeToString(), ctx)\n+ return C.check_attribute(attr.SerializeToString(), ctx, lex_ctx)\n \n \n-def check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n+def check_node(\n+ node: NodeProto,\n+ ctx: C.CheckerContext = DEFAULT_CONTEXT,\n+ lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n+) -> None:\n _ensure_proto_type(node, NodeProto)\n- return C.check_node(node.SerializeToString(), ctx)\n+ return C.check_node(node.SerializeToString(), ctx, lex_ctx)\n \n \n def check_function(\n- function: FunctionProto, ctx: C.CheckerContext | None = None\n+ function: FunctionProto,\n+ ctx: C.CheckerContext,\n+ lex_ctx: C.LexicalScopeContext,\n ) -> None:\n _ensure_proto_type(function, FunctionProto)\n- if ctx is None:\n- ctx = C.CheckerContext()\n- ctx.ir_version = helper.find_min_ir_version_for(\n- list(function.opset_import), True\n- )\n- function_opset_dic = {}\n- for domain_version in function.opset_import:\n- function_opset_dic[domain_version.domain] = domain_version.version\n- ctx.opset_imports = function_opset_dic\n- C.check_function(function.SerializeToString(), ctx)\n+ C.check_function(function.SerializeToString(), ctx, lex_ctx)\n \n \n-def check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n+def check_graph(\n+ graph: GraphProto,\n+ ctx: C.CheckerContext = DEFAULT_CONTEXT,\n+ lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n+) -> None:\n _ensure_proto_type(graph, GraphProto)\n- return C.check_graph(graph.SerializeToString(), ctx)\n+ return C.check_graph(graph.SerializeToString(), ctx, lex_ctx)\n \n \n def check_sparse_tensor(\n", "issue": "[Feature request] Expose lexical scope context in Python checker\n### System information\n\nLatest\n\n### What is the problem that this feature solves?\n\nCurrently lexical scope context is not exposed in Python onnx.checker.\n\n### Alternatives considered\n\n_No response_\n\n### Describe the feature\n\nFollow up of https://github.com/onnx/onnx/pull/4720. Expose lexical scope context in Python onnx.checker. See https://github.com/onnx/onnx/blob/3747442528c820ab8dd41111ef3e9ab1a4da6062/onnx/cpp2py_export.cc#L378\n\n### Will this influence the current api (Y/N)?\n\nY. Extended parameters will be added.\n\n### Feature Area\n\nchecker\n\n### Are you willing to contribute it (Y/N)\n\nYes\n\n### Notes\n\n_No response_\n", "before_files": [{"content": "# Copyright (c) ONNX Project Contributors\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Graph utilities for checking whether an ONNX proto message is legal.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"check_attribute\",\n \"check_function\",\n \"check_graph\",\n \"check_model\",\n \"check_node\",\n \"check_sparse_tensor\",\n \"check_tensor\",\n \"check_value_info\",\n \"DEFAULT_CONTEXT\",\n \"ValidationError\",\n \"C\",\n \"MAXIMUM_PROTOBUF\",\n]\n\nimport os\nimport sys\nfrom typing import Any, Callable, TypeVar\n\nfrom google.protobuf.message import Message\n\nimport onnx.defs\nimport onnx.onnx_cpp2py_export.checker as C # noqa: N812\nimport onnx.shape_inference\nfrom onnx import (\n IR_VERSION,\n AttributeProto,\n FunctionProto,\n GraphProto,\n ModelProto,\n NodeProto,\n SparseTensorProto,\n TensorProto,\n ValueInfoProto,\n helper,\n)\n\n# Limitation of single protobuf file is 2GB\nMAXIMUM_PROTOBUF = 2000000000\n\n# TODO: This thing where we reserialize the protobuf back into the\n# string, only to deserialize it at the call site, is really goofy.\n# Stop doing that.\n\n\n# NB: Please don't edit this context!\nDEFAULT_CONTEXT = C.CheckerContext()\nDEFAULT_CONTEXT.ir_version = IR_VERSION\n# TODO: Maybe ONNX-ML should also be defaulted?\nDEFAULT_CONTEXT.opset_imports = {\"\": onnx.defs.onnx_opset_version()}\n\n\nFuncType = TypeVar(\"FuncType\", bound=Callable[..., Any])\n\n\ndef _ensure_proto_type(proto: Message, proto_type: type[Message]) -> None:\n if not isinstance(proto, proto_type):\n raise TypeError(\n f\"The proto message needs to be of type '{proto_type.__name__}'\"\n )\n\n\ndef check_value_info(\n value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(value_info, ValueInfoProto)\n return C.check_value_info(value_info.SerializeToString(), ctx)\n\n\ndef check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(tensor, TensorProto)\n return C.check_tensor(tensor.SerializeToString(), ctx)\n\n\ndef check_attribute(\n attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(attr, AttributeProto)\n return C.check_attribute(attr.SerializeToString(), ctx)\n\n\ndef check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(node, NodeProto)\n return C.check_node(node.SerializeToString(), ctx)\n\n\ndef check_function(\n function: FunctionProto, ctx: C.CheckerContext | None = None\n) -> None:\n _ensure_proto_type(function, FunctionProto)\n if ctx is None:\n ctx = C.CheckerContext()\n ctx.ir_version = helper.find_min_ir_version_for(\n list(function.opset_import), True\n )\n function_opset_dic = {}\n for domain_version in function.opset_import:\n function_opset_dic[domain_version.domain] = domain_version.version\n ctx.opset_imports = function_opset_dic\n C.check_function(function.SerializeToString(), ctx)\n\n\ndef check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(graph, GraphProto)\n return C.check_graph(graph.SerializeToString(), ctx)\n\n\ndef check_sparse_tensor(\n sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(sparse, SparseTensorProto)\n C.check_sparse_tensor(sparse.SerializeToString(), ctx)\n\n\ndef check_model(\n model: ModelProto | str | bytes | os.PathLike,\n full_check: bool = False,\n skip_opset_compatibility_check: bool = False,\n) -> None:\n \"\"\"Check the consistency of a model.\n\n An exception will be raised if the model's ir_version is not set\n properly or is higher than checker's ir_version, or if the model\n has duplicate keys in metadata_props.\n\n If IR version >= 3, the model must specify opset_import.\n If IR version < 3, the model cannot have any opset_import specified.\n\n Args:\n model: Model to check. If model is a path, the function checks model\n path first. If the model bytes size is larger than 2GB, function\n should be called using model path.\n full_check: If True, the function also runs shape inference check.\n skip_opset_compatibility_check: If True, the function skips the check for\n opset compatibility.\n \"\"\"\n # If model is a path instead of ModelProto\n if isinstance(model, (str, os.PathLike)):\n C.check_model_path(os.fspath(model), full_check, skip_opset_compatibility_check)\n else:\n protobuf_string = (\n model if isinstance(model, bytes) else model.SerializeToString()\n )\n # If the protobuf is larger than 2GB,\n # remind users should use the model path to check\n if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:\n raise ValueError(\n \"This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.\"\n )\n C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)\n\n\nValidationError = C.ValidationError\n", "path": "onnx/checker.py"}], "after_files": [{"content": "# Copyright (c) ONNX Project Contributors\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Graph utilities for checking whether an ONNX proto message is legal.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"check_attribute\",\n \"check_function\",\n \"check_graph\",\n \"check_model\",\n \"check_node\",\n \"check_sparse_tensor\",\n \"check_tensor\",\n \"check_value_info\",\n \"DEFAULT_CONTEXT\",\n \"LEXICAL_SCOPE_CONTEXT\",\n \"ValidationError\",\n \"C\",\n \"MAXIMUM_PROTOBUF\",\n]\n\nimport os\nimport sys\nfrom typing import Any, Callable, TypeVar\n\nfrom google.protobuf.message import Message\n\nimport onnx.defs\nimport onnx.onnx_cpp2py_export.checker as C # noqa: N812\nimport onnx.shape_inference\nfrom onnx import (\n IR_VERSION,\n AttributeProto,\n FunctionProto,\n GraphProto,\n ModelProto,\n NodeProto,\n SparseTensorProto,\n TensorProto,\n ValueInfoProto,\n)\n\n# Limitation of single protobuf file is 2GB\nMAXIMUM_PROTOBUF = 2000000000\n\n# TODO: This thing where we reserialize the protobuf back into the\n# string, only to deserialize it at the call site, is really goofy.\n# Stop doing that.\n\n\n# NB: Please don't edit this context!\nDEFAULT_CONTEXT = C.CheckerContext()\nDEFAULT_CONTEXT.ir_version = IR_VERSION\n# TODO: Maybe ONNX-ML should also be defaulted?\nDEFAULT_CONTEXT.opset_imports = {\"\": onnx.defs.onnx_opset_version()}\n\nLEXICAL_SCOPE_CONTEXT = C.LexicalScopeContext()\n\n\nFuncType = TypeVar(\"FuncType\", bound=Callable[..., Any])\n\n\ndef _ensure_proto_type(proto: Message, proto_type: type[Message]) -> None:\n if not isinstance(proto, proto_type):\n raise TypeError(\n f\"The proto message needs to be of type '{proto_type.__name__}'\"\n )\n\n\ndef check_value_info(\n value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(value_info, ValueInfoProto)\n return C.check_value_info(value_info.SerializeToString(), ctx)\n\n\ndef check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(tensor, TensorProto)\n return C.check_tensor(tensor.SerializeToString(), ctx)\n\n\ndef check_attribute(\n attr: AttributeProto,\n ctx: C.CheckerContext = DEFAULT_CONTEXT,\n lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n) -> None:\n _ensure_proto_type(attr, AttributeProto)\n return C.check_attribute(attr.SerializeToString(), ctx, lex_ctx)\n\n\ndef check_node(\n node: NodeProto,\n ctx: C.CheckerContext = DEFAULT_CONTEXT,\n lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n) -> None:\n _ensure_proto_type(node, NodeProto)\n return C.check_node(node.SerializeToString(), ctx, lex_ctx)\n\n\ndef check_function(\n function: FunctionProto,\n ctx: C.CheckerContext,\n lex_ctx: C.LexicalScopeContext,\n) -> None:\n _ensure_proto_type(function, FunctionProto)\n C.check_function(function.SerializeToString(), ctx, lex_ctx)\n\n\ndef check_graph(\n graph: GraphProto,\n ctx: C.CheckerContext = DEFAULT_CONTEXT,\n lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n) -> None:\n _ensure_proto_type(graph, GraphProto)\n return C.check_graph(graph.SerializeToString(), ctx, lex_ctx)\n\n\ndef check_sparse_tensor(\n sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(sparse, SparseTensorProto)\n C.check_sparse_tensor(sparse.SerializeToString(), ctx)\n\n\ndef check_model(\n model: ModelProto | str | bytes | os.PathLike,\n full_check: bool = False,\n skip_opset_compatibility_check: bool = False,\n) -> None:\n \"\"\"Check the consistency of a model.\n\n An exception will be raised if the model's ir_version is not set\n properly or is higher than checker's ir_version, or if the model\n has duplicate keys in metadata_props.\n\n If IR version >= 3, the model must specify opset_import.\n If IR version < 3, the model cannot have any opset_import specified.\n\n Args:\n model: Model to check. If model is a path, the function checks model\n path first. If the model bytes size is larger than 2GB, function\n should be called using model path.\n full_check: If True, the function also runs shape inference check.\n skip_opset_compatibility_check: If True, the function skips the check for\n opset compatibility.\n \"\"\"\n # If model is a path instead of ModelProto\n if isinstance(model, (str, os.PathLike)):\n C.check_model_path(os.fspath(model), full_check, skip_opset_compatibility_check)\n else:\n protobuf_string = (\n model if isinstance(model, bytes) else model.SerializeToString()\n )\n # If the protobuf is larger than 2GB,\n # remind users should use the model path to check\n if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:\n raise ValueError(\n \"This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.\"\n )\n C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)\n\n\nValidationError = C.ValidationError\n", "path": "onnx/checker.py"}]} | 2,034 | 713 |
gh_patches_debug_27826 | rasdani/github-patches | git_diff | qtile__qtile-3863 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bluetooth widget displays adapter name instead of name of connected device
### The issue:
version: 0.21.0
log: no relevant log
I configured the bluetooth-widget.
When a device is connected, it shows the adapter name, instead of the device name.
### Required:
- [X] I have searched past issues to see if this bug has already been reported.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/widget/bluetooth.py`
Content:
```
1 # Copyright (c) 2021 Graeme Holliday
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 from dbus_next.aio import MessageBus
22 from dbus_next.constants import BusType
23
24 from libqtile.widget import base
25
26 BLUEZ = "org.bluez"
27 BLUEZ_PATH = "/org/bluez/hci0"
28 BLUEZ_ADAPTER = "org.bluez.Adapter1"
29 BLUEZ_DEVICE = "org.bluez.Device1"
30 BLUEZ_PROPERTIES = "org.freedesktop.DBus.Properties"
31
32
33 class Bluetooth(base._TextBox):
34 """
35 Displays bluetooth status for a particular connected device.
36
37 (For example your bluetooth headphones.)
38
39 Uses dbus-next to communicate with the system bus.
40
41 Widget requirements: dbus-next_.
42
43 .. _dbus-next: https://pypi.org/project/dbus-next/
44 """
45
46 defaults = [
47 (
48 "hci",
49 "/dev_XX_XX_XX_XX_XX_XX",
50 "hci0 device path, can be found with d-feet or similar dbus explorer.",
51 )
52 ]
53
54 def __init__(self, **config):
55 base._TextBox.__init__(self, "", **config)
56 self.add_defaults(Bluetooth.defaults)
57
58 async def _config_async(self):
59 # set initial values
60 self.powered = await self._init_adapter()
61 self.connected, self.device = await self._init_device()
62
63 self.update_text()
64
65 async def _init_adapter(self):
66 # set up interface to adapter properties using high-level api
67 bus = await MessageBus(bus_type=BusType.SYSTEM).connect()
68 introspect = await bus.introspect(BLUEZ, BLUEZ_PATH)
69 obj = bus.get_proxy_object(BLUEZ, BLUEZ_PATH, introspect)
70 iface = obj.get_interface(BLUEZ_ADAPTER)
71 props = obj.get_interface(BLUEZ_PROPERTIES)
72
73 powered = await iface.get_powered()
74 # subscribe receiver to property changed
75 props.on_properties_changed(self._signal_received)
76 return powered
77
78 async def _init_device(self):
79 # set up interface to device properties using high-level api
80 bus = await MessageBus(bus_type=BusType.SYSTEM).connect()
81 introspect = await bus.introspect(BLUEZ, BLUEZ_PATH + self.hci)
82 obj = bus.get_proxy_object(BLUEZ, BLUEZ_PATH + self.hci, introspect)
83 iface = obj.get_interface(BLUEZ_DEVICE)
84 props = obj.get_interface(BLUEZ_PROPERTIES)
85
86 connected = await iface.get_connected()
87 name = await iface.get_name()
88 # subscribe receiver to property changed
89 props.on_properties_changed(self._signal_received)
90 return connected, name
91
92 def _signal_received(self, interface_name, changed_properties, _invalidated_properties):
93 powered = changed_properties.get("Powered", None)
94 if powered is not None:
95 self.powered = powered.value
96 self.update_text()
97
98 connected = changed_properties.get("Connected", None)
99 if connected is not None:
100 self.connected = connected.value
101 self.update_text()
102
103 device = changed_properties.get("Name", None)
104 if device is not None:
105 self.device = device.value
106 self.update_text()
107
108 def update_text(self):
109 text = ""
110 if not self.powered:
111 text = "off"
112 else:
113 if not self.connected:
114 text = "on"
115 else:
116 text = self.device
117 self.update(text)
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libqtile/widget/bluetooth.py b/libqtile/widget/bluetooth.py
--- a/libqtile/widget/bluetooth.py
+++ b/libqtile/widget/bluetooth.py
@@ -72,7 +72,7 @@
powered = await iface.get_powered()
# subscribe receiver to property changed
- props.on_properties_changed(self._signal_received)
+ props.on_properties_changed(self._adapter_signal_received)
return powered
async def _init_device(self):
@@ -86,15 +86,20 @@
connected = await iface.get_connected()
name = await iface.get_name()
# subscribe receiver to property changed
- props.on_properties_changed(self._signal_received)
+ props.on_properties_changed(self._device_signal_received)
return connected, name
- def _signal_received(self, interface_name, changed_properties, _invalidated_properties):
+ def _adapter_signal_received(
+ self, interface_name, changed_properties, _invalidated_properties
+ ):
powered = changed_properties.get("Powered", None)
if powered is not None:
self.powered = powered.value
self.update_text()
+ def _device_signal_received(
+ self, interface_name, changed_properties, _invalidated_properties
+ ):
connected = changed_properties.get("Connected", None)
if connected is not None:
self.connected = connected.value
| {"golden_diff": "diff --git a/libqtile/widget/bluetooth.py b/libqtile/widget/bluetooth.py\n--- a/libqtile/widget/bluetooth.py\n+++ b/libqtile/widget/bluetooth.py\n@@ -72,7 +72,7 @@\n \n powered = await iface.get_powered()\n # subscribe receiver to property changed\n- props.on_properties_changed(self._signal_received)\n+ props.on_properties_changed(self._adapter_signal_received)\n return powered\n \n async def _init_device(self):\n@@ -86,15 +86,20 @@\n connected = await iface.get_connected()\n name = await iface.get_name()\n # subscribe receiver to property changed\n- props.on_properties_changed(self._signal_received)\n+ props.on_properties_changed(self._device_signal_received)\n return connected, name\n \n- def _signal_received(self, interface_name, changed_properties, _invalidated_properties):\n+ def _adapter_signal_received(\n+ self, interface_name, changed_properties, _invalidated_properties\n+ ):\n powered = changed_properties.get(\"Powered\", None)\n if powered is not None:\n self.powered = powered.value\n self.update_text()\n \n+ def _device_signal_received(\n+ self, interface_name, changed_properties, _invalidated_properties\n+ ):\n connected = changed_properties.get(\"Connected\", None)\n if connected is not None:\n self.connected = connected.value\n", "issue": "Bluetooth widget displays adapter name instead of name of connected device\n### The issue:\n\nversion: 0.21.0\r\nlog: no relevant log\r\n\r\nI configured the bluetooth-widget.\r\nWhen a device is connected, it shows the adapter name, instead of the device name.\n\n### Required:\n\n- [X] I have searched past issues to see if this bug has already been reported.\n", "before_files": [{"content": "# Copyright (c) 2021 Graeme Holliday\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom dbus_next.aio import MessageBus\nfrom dbus_next.constants import BusType\n\nfrom libqtile.widget import base\n\nBLUEZ = \"org.bluez\"\nBLUEZ_PATH = \"/org/bluez/hci0\"\nBLUEZ_ADAPTER = \"org.bluez.Adapter1\"\nBLUEZ_DEVICE = \"org.bluez.Device1\"\nBLUEZ_PROPERTIES = \"org.freedesktop.DBus.Properties\"\n\n\nclass Bluetooth(base._TextBox):\n \"\"\"\n Displays bluetooth status for a particular connected device.\n\n (For example your bluetooth headphones.)\n\n Uses dbus-next to communicate with the system bus.\n\n Widget requirements: dbus-next_.\n\n .. _dbus-next: https://pypi.org/project/dbus-next/\n \"\"\"\n\n defaults = [\n (\n \"hci\",\n \"/dev_XX_XX_XX_XX_XX_XX\",\n \"hci0 device path, can be found with d-feet or similar dbus explorer.\",\n )\n ]\n\n def __init__(self, **config):\n base._TextBox.__init__(self, \"\", **config)\n self.add_defaults(Bluetooth.defaults)\n\n async def _config_async(self):\n # set initial values\n self.powered = await self._init_adapter()\n self.connected, self.device = await self._init_device()\n\n self.update_text()\n\n async def _init_adapter(self):\n # set up interface to adapter properties using high-level api\n bus = await MessageBus(bus_type=BusType.SYSTEM).connect()\n introspect = await bus.introspect(BLUEZ, BLUEZ_PATH)\n obj = bus.get_proxy_object(BLUEZ, BLUEZ_PATH, introspect)\n iface = obj.get_interface(BLUEZ_ADAPTER)\n props = obj.get_interface(BLUEZ_PROPERTIES)\n\n powered = await iface.get_powered()\n # subscribe receiver to property changed\n props.on_properties_changed(self._signal_received)\n return powered\n\n async def _init_device(self):\n # set up interface to device properties using high-level api\n bus = await MessageBus(bus_type=BusType.SYSTEM).connect()\n introspect = await bus.introspect(BLUEZ, BLUEZ_PATH + self.hci)\n obj = bus.get_proxy_object(BLUEZ, BLUEZ_PATH + self.hci, introspect)\n iface = obj.get_interface(BLUEZ_DEVICE)\n props = obj.get_interface(BLUEZ_PROPERTIES)\n\n connected = await iface.get_connected()\n name = await iface.get_name()\n # subscribe receiver to property changed\n props.on_properties_changed(self._signal_received)\n return connected, name\n\n def _signal_received(self, interface_name, changed_properties, _invalidated_properties):\n powered = changed_properties.get(\"Powered\", None)\n if powered is not None:\n self.powered = powered.value\n self.update_text()\n\n connected = changed_properties.get(\"Connected\", None)\n if connected is not None:\n self.connected = connected.value\n self.update_text()\n\n device = changed_properties.get(\"Name\", None)\n if device is not None:\n self.device = device.value\n self.update_text()\n\n def update_text(self):\n text = \"\"\n if not self.powered:\n text = \"off\"\n else:\n if not self.connected:\n text = \"on\"\n else:\n text = self.device\n self.update(text)\n", "path": "libqtile/widget/bluetooth.py"}], "after_files": [{"content": "# Copyright (c) 2021 Graeme Holliday\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom dbus_next.aio import MessageBus\nfrom dbus_next.constants import BusType\n\nfrom libqtile.widget import base\n\nBLUEZ = \"org.bluez\"\nBLUEZ_PATH = \"/org/bluez/hci0\"\nBLUEZ_ADAPTER = \"org.bluez.Adapter1\"\nBLUEZ_DEVICE = \"org.bluez.Device1\"\nBLUEZ_PROPERTIES = \"org.freedesktop.DBus.Properties\"\n\n\nclass Bluetooth(base._TextBox):\n \"\"\"\n Displays bluetooth status for a particular connected device.\n\n (For example your bluetooth headphones.)\n\n Uses dbus-next to communicate with the system bus.\n\n Widget requirements: dbus-next_.\n\n .. _dbus-next: https://pypi.org/project/dbus-next/\n \"\"\"\n\n defaults = [\n (\n \"hci\",\n \"/dev_XX_XX_XX_XX_XX_XX\",\n \"hci0 device path, can be found with d-feet or similar dbus explorer.\",\n )\n ]\n\n def __init__(self, **config):\n base._TextBox.__init__(self, \"\", **config)\n self.add_defaults(Bluetooth.defaults)\n\n async def _config_async(self):\n # set initial values\n self.powered = await self._init_adapter()\n self.connected, self.device = await self._init_device()\n\n self.update_text()\n\n async def _init_adapter(self):\n # set up interface to adapter properties using high-level api\n bus = await MessageBus(bus_type=BusType.SYSTEM).connect()\n introspect = await bus.introspect(BLUEZ, BLUEZ_PATH)\n obj = bus.get_proxy_object(BLUEZ, BLUEZ_PATH, introspect)\n iface = obj.get_interface(BLUEZ_ADAPTER)\n props = obj.get_interface(BLUEZ_PROPERTIES)\n\n powered = await iface.get_powered()\n # subscribe receiver to property changed\n props.on_properties_changed(self._adapter_signal_received)\n return powered\n\n async def _init_device(self):\n # set up interface to device properties using high-level api\n bus = await MessageBus(bus_type=BusType.SYSTEM).connect()\n introspect = await bus.introspect(BLUEZ, BLUEZ_PATH + self.hci)\n obj = bus.get_proxy_object(BLUEZ, BLUEZ_PATH + self.hci, introspect)\n iface = obj.get_interface(BLUEZ_DEVICE)\n props = obj.get_interface(BLUEZ_PROPERTIES)\n\n connected = await iface.get_connected()\n name = await iface.get_name()\n # subscribe receiver to property changed\n props.on_properties_changed(self._device_signal_received)\n return connected, name\n\n def _adapter_signal_received(\n self, interface_name, changed_properties, _invalidated_properties\n ):\n powered = changed_properties.get(\"Powered\", None)\n if powered is not None:\n self.powered = powered.value\n self.update_text()\n\n def _device_signal_received(\n self, interface_name, changed_properties, _invalidated_properties\n ):\n connected = changed_properties.get(\"Connected\", None)\n if connected is not None:\n self.connected = connected.value\n self.update_text()\n\n device = changed_properties.get(\"Name\", None)\n if device is not None:\n self.device = device.value\n self.update_text()\n\n def update_text(self):\n text = \"\"\n if not self.powered:\n text = \"off\"\n else:\n if not self.connected:\n text = \"on\"\n else:\n text = self.device\n self.update(text)\n", "path": "libqtile/widget/bluetooth.py"}]} | 1,541 | 300 |
gh_patches_debug_5575 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3047 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add OpenAPI Specification for /databases/ endpoint
## Problem
In order to ensure the accuracy of the specs generated by drf-spectacular for /databases/ endpoint , we will generate an OpenAPI Specification specifically for this endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page.
## Proposed solution
* Implement custom preprocessing hook function to filter out all endpoints except for the /databases/ endpoint.The hook function selectively retains only the endpoint paths that match the /datafiles/
* Configure the PREPROCESSING_HOOKS setting with the custom hook function, we ensure that only the /datafiles/ endpoint is considered during the OpenAPI specification generation process.
* Finally, generate the spec file using the _./manage.py spectacular --color --file schema.yml_ command
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `config/settings/openapi.py`
Content:
```
1 def custom_preprocessing_hook(endpoints):
2 filtered = []
3 for (path, path_regex, method, callback) in endpoints:
4 # Remove all but DRF API endpoints
5 if path.startswith("/api/db/v0/data_files/"):
6 filtered.append((path, path_regex, method, callback))
7 return filtered
8
9
10 def remove_url_prefix_hook(result, **kwargs):
11 # Remove namespace and version URL prefix from the operation Id of the generated API schema
12 for path, path_info in result['paths'].items():
13 for method, operation in path_info.items():
14 operation_id = operation.get('operationId')
15 if operation_id:
16 if path.startswith('/api/db/v0/'):
17 operation['operationId'] = operation_id.replace('db_v0_', '')
18 elif path.startswith('/api/ui/v0/'):
19 operation['operationId'] = operation_id.replace('ui_v0_', '')
20
21 return result
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/config/settings/openapi.py b/config/settings/openapi.py
--- a/config/settings/openapi.py
+++ b/config/settings/openapi.py
@@ -2,7 +2,7 @@
filtered = []
for (path, path_regex, method, callback) in endpoints:
# Remove all but DRF API endpoints
- if path.startswith("/api/db/v0/data_files/"):
+ if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/"):
filtered.append((path, path_regex, method, callback))
return filtered
| {"golden_diff": "diff --git a/config/settings/openapi.py b/config/settings/openapi.py\n--- a/config/settings/openapi.py\n+++ b/config/settings/openapi.py\n@@ -2,7 +2,7 @@\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n- if path.startswith(\"/api/db/v0/data_files/\"):\n+ if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n", "issue": "Add OpenAPI Specification for /databases/ endpoint \n## Problem\r\nIn order to ensure the accuracy of the specs generated by drf-spectacular for /databases/ endpoint , we will generate an OpenAPI Specification specifically for this endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page.\r\n\r\n## Proposed solution\r\n* Implement custom preprocessing hook function to filter out all endpoints except for the /databases/ endpoint.The hook function selectively retains only the endpoint paths that match the /datafiles/ \r\n* Configure the PREPROCESSING_HOOKS setting with the custom hook function, we ensure that only the /datafiles/ endpoint is considered during the OpenAPI specification generation process.\r\n* Finally, generate the spec file using the _./manage.py spectacular --color --file schema.yml_ command\r\n\r\n\n", "before_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/data_files/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}], "after_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}]} | 663 | 127 |
gh_patches_debug_7158 | rasdani/github-patches | git_diff | liberapay__liberapay.com-1140 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GitLab support is broken
`{"error":"API V3 is no longer supported. Use API V4 instead."}`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `liberapay/elsewhere/gitlab.py`
Content:
```
1 from __future__ import absolute_import, division, print_function, unicode_literals
2
3 from liberapay.elsewhere._base import PlatformOAuth2
4 from liberapay.elsewhere._extractors import key
5 from liberapay.elsewhere._paginators import header_links_paginator
6
7
8 class GitLab(PlatformOAuth2):
9
10 # Platform attributes
11 name = 'gitlab'
12 display_name = 'GitLab'
13 account_url = 'https://gitlab.com/u/{user_name}'
14 repo_url = 'https://gitlab.com/{slug}'
15 has_teams = True
16
17 # Auth attributes
18 # GitLab uses https://github.com/doorkeeper-gem/doorkeeper
19 auth_url = 'https://gitlab.com/oauth/authorize'
20 access_token_url = 'https://gitlab.com/oauth/token'
21
22 # can_auth_with_client_credentials = True
23 # https://gitlab.com/gitlab-org/gitlab-ce/issues/13795
24
25 # API attributes
26 # http://doc.gitlab.com/ce/api/
27 api_format = 'json'
28 api_paginator = header_links_paginator(total_header='X-Total')
29 api_url = 'https://gitlab.com/api/v3'
30 api_user_info_path = '/users/{user_id}'
31 api_user_name_info_path = '/users?username={user_name}'
32 api_user_self_info_path = '/user'
33 api_team_members_path = '/groups/{user_name}/members'
34 api_repos_path = '/projects?owned=true&visibility=public&order_by=last_activity_at&per_page=100'
35 api_starred_path = '/projects?starred=true&visibility=public'
36
37 # User info extractors
38 x_user_id = key('id')
39 x_user_name = key('username')
40 x_display_name = key('name')
41 x_email = key('email')
42 x_avatar_url = key('avatar_url')
43 x_description = key('bio')
44
45 # Repo info extractors
46 x_repo_id = key('id')
47 x_repo_name = key('name')
48 x_repo_slug = key('path_with_namespace')
49 x_repo_description = key('description')
50 x_repo_last_update = key('last_activity_at')
51 x_repo_is_fork = key('forked_from_project', clean=bool)
52 x_repo_stars_count = key('star_count')
53 x_repo_owner_id = key('owner', clean=lambda d: d['id'])
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/liberapay/elsewhere/gitlab.py b/liberapay/elsewhere/gitlab.py
--- a/liberapay/elsewhere/gitlab.py
+++ b/liberapay/elsewhere/gitlab.py
@@ -26,7 +26,7 @@
# http://doc.gitlab.com/ce/api/
api_format = 'json'
api_paginator = header_links_paginator(total_header='X-Total')
- api_url = 'https://gitlab.com/api/v3'
+ api_url = 'https://gitlab.com/api/v4'
api_user_info_path = '/users/{user_id}'
api_user_name_info_path = '/users?username={user_name}'
api_user_self_info_path = '/user'
| {"golden_diff": "diff --git a/liberapay/elsewhere/gitlab.py b/liberapay/elsewhere/gitlab.py\n--- a/liberapay/elsewhere/gitlab.py\n+++ b/liberapay/elsewhere/gitlab.py\n@@ -26,7 +26,7 @@\n # http://doc.gitlab.com/ce/api/\n api_format = 'json'\n api_paginator = header_links_paginator(total_header='X-Total')\n- api_url = 'https://gitlab.com/api/v3'\n+ api_url = 'https://gitlab.com/api/v4'\n api_user_info_path = '/users/{user_id}'\n api_user_name_info_path = '/users?username={user_name}'\n api_user_self_info_path = '/user'\n", "issue": "GitLab support is broken\n`{\"error\":\"API V3 is no longer supported. Use API V4 instead.\"}`\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom liberapay.elsewhere._base import PlatformOAuth2\nfrom liberapay.elsewhere._extractors import key\nfrom liberapay.elsewhere._paginators import header_links_paginator\n\n\nclass GitLab(PlatformOAuth2):\n\n # Platform attributes\n name = 'gitlab'\n display_name = 'GitLab'\n account_url = 'https://gitlab.com/u/{user_name}'\n repo_url = 'https://gitlab.com/{slug}'\n has_teams = True\n\n # Auth attributes\n # GitLab uses https://github.com/doorkeeper-gem/doorkeeper\n auth_url = 'https://gitlab.com/oauth/authorize'\n access_token_url = 'https://gitlab.com/oauth/token'\n\n # can_auth_with_client_credentials = True\n # https://gitlab.com/gitlab-org/gitlab-ce/issues/13795\n\n # API attributes\n # http://doc.gitlab.com/ce/api/\n api_format = 'json'\n api_paginator = header_links_paginator(total_header='X-Total')\n api_url = 'https://gitlab.com/api/v3'\n api_user_info_path = '/users/{user_id}'\n api_user_name_info_path = '/users?username={user_name}'\n api_user_self_info_path = '/user'\n api_team_members_path = '/groups/{user_name}/members'\n api_repos_path = '/projects?owned=true&visibility=public&order_by=last_activity_at&per_page=100'\n api_starred_path = '/projects?starred=true&visibility=public'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('username')\n x_display_name = key('name')\n x_email = key('email')\n x_avatar_url = key('avatar_url')\n x_description = key('bio')\n\n # Repo info extractors\n x_repo_id = key('id')\n x_repo_name = key('name')\n x_repo_slug = key('path_with_namespace')\n x_repo_description = key('description')\n x_repo_last_update = key('last_activity_at')\n x_repo_is_fork = key('forked_from_project', clean=bool)\n x_repo_stars_count = key('star_count')\n x_repo_owner_id = key('owner', clean=lambda d: d['id'])\n", "path": "liberapay/elsewhere/gitlab.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom liberapay.elsewhere._base import PlatformOAuth2\nfrom liberapay.elsewhere._extractors import key\nfrom liberapay.elsewhere._paginators import header_links_paginator\n\n\nclass GitLab(PlatformOAuth2):\n\n # Platform attributes\n name = 'gitlab'\n display_name = 'GitLab'\n account_url = 'https://gitlab.com/u/{user_name}'\n repo_url = 'https://gitlab.com/{slug}'\n has_teams = True\n\n # Auth attributes\n # GitLab uses https://github.com/doorkeeper-gem/doorkeeper\n auth_url = 'https://gitlab.com/oauth/authorize'\n access_token_url = 'https://gitlab.com/oauth/token'\n\n # can_auth_with_client_credentials = True\n # https://gitlab.com/gitlab-org/gitlab-ce/issues/13795\n\n # API attributes\n # http://doc.gitlab.com/ce/api/\n api_format = 'json'\n api_paginator = header_links_paginator(total_header='X-Total')\n api_url = 'https://gitlab.com/api/v4'\n api_user_info_path = '/users/{user_id}'\n api_user_name_info_path = '/users?username={user_name}'\n api_user_self_info_path = '/user'\n api_team_members_path = '/groups/{user_name}/members'\n api_repos_path = '/projects?owned=true&visibility=public&order_by=last_activity_at&per_page=100'\n api_starred_path = '/projects?starred=true&visibility=public'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('username')\n x_display_name = key('name')\n x_email = key('email')\n x_avatar_url = key('avatar_url')\n x_description = key('bio')\n\n # Repo info extractors\n x_repo_id = key('id')\n x_repo_name = key('name')\n x_repo_slug = key('path_with_namespace')\n x_repo_description = key('description')\n x_repo_last_update = key('last_activity_at')\n x_repo_is_fork = key('forked_from_project', clean=bool)\n x_repo_stars_count = key('star_count')\n x_repo_owner_id = key('owner', clean=lambda d: d['id'])\n", "path": "liberapay/elsewhere/gitlab.py"}]} | 918 | 166 |
gh_patches_debug_20940 | rasdani/github-patches | git_diff | inventree__InvenTree-2984 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add unittests for auth stack
Add full coverage for https://github.com/inventree/InvenTree/pull/2976
And the full auth stack in the middleware
Add unittests for auth stack
Add full coverage for https://github.com/inventree/InvenTree/pull/2976
And the full auth stack in the middleware
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `InvenTree/InvenTree/middleware.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from django.conf import settings
4 from django.contrib.auth.middleware import PersistentRemoteUserMiddleware
5 from django.http import HttpResponse
6 from django.shortcuts import HttpResponseRedirect
7 from django.shortcuts import redirect
8 from django.urls import reverse_lazy, Resolver404
9 from django.urls import include, re_path
10
11 import logging
12
13 from rest_framework.authtoken.models import Token
14 from allauth_2fa.middleware import BaseRequire2FAMiddleware, AllauthTwoFactorMiddleware
15
16 from InvenTree.urls import frontendpatterns
17 from common.models import InvenTreeSetting
18
19
20 logger = logging.getLogger("inventree")
21
22
23 class AuthRequiredMiddleware(object):
24 def __init__(self, get_response):
25 self.get_response = get_response
26
27 def __call__(self, request):
28 # Code to be executed for each request before
29 # the view (and later middleware) are called.
30
31 assert hasattr(request, 'user')
32
33 # API requests are handled by the DRF library
34 if request.path_info.startswith('/api/'):
35 return self.get_response(request)
36
37 if not request.user.is_authenticated:
38 """
39 Normally, a web-based session would use csrftoken based authentication.
40 However when running an external application (e.g. the InvenTree app or Python library),
41 we must validate the user token manually.
42 """
43
44 authorized = False
45
46 # Allow static files to be accessed without auth
47 # Important for e.g. login page
48 if request.path_info.startswith('/static/'):
49 authorized = True
50
51 # Unauthorized users can access the login page
52 elif request.path_info.startswith('/accounts/'):
53 authorized = True
54
55 elif 'Authorization' in request.headers.keys() or 'authorization' in request.headers.keys():
56 auth = request.headers.get('Authorization', request.headers.get('authorization')).strip()
57
58 if auth.lower().startswith('token') and len(auth.split()) == 2:
59 token_key = auth.split()[1]
60
61 # Does the provided token match a valid user?
62 try:
63 token = Token.objects.get(key=token_key)
64
65 # Provide the user information to the request
66 request.user = token.user
67 authorized = True
68
69 except Token.DoesNotExist:
70 logger.warning(f"Access denied for unknown token {token_key}")
71
72 # No authorization was found for the request
73 if not authorized:
74 # A logout request will redirect the user to the login screen
75 if request.path_info == reverse_lazy('account_logout'):
76 return HttpResponseRedirect(reverse_lazy('account_login'))
77
78 path = request.path_info
79
80 # List of URL endpoints we *do not* want to redirect to
81 urls = [
82 reverse_lazy('account_login'),
83 reverse_lazy('account_logout'),
84 reverse_lazy('admin:login'),
85 reverse_lazy('admin:logout'),
86 ]
87
88 # Do not redirect requests to any of these paths
89 paths_ignore = [
90 '/api/',
91 '/js/',
92 '/media/',
93 '/static/',
94 ]
95
96 if path not in urls and not any([path.startswith(p) for p in paths_ignore]):
97 # Save the 'next' parameter to pass through to the login view
98
99 return redirect('{}?next={}'.format(reverse_lazy('account_login'), request.path))
100
101 else:
102 # Return a 401 (Unauthorized) response code for this request
103 return HttpResponse('Unauthorized', status=401)
104
105 response = self.get_response(request)
106
107 return response
108
109
110 url_matcher = re_path('', include(frontendpatterns))
111
112
113 class Check2FAMiddleware(BaseRequire2FAMiddleware):
114 """check if user is required to have MFA enabled"""
115 def require_2fa(self, request):
116 # Superusers are require to have 2FA.
117 try:
118 if url_matcher.resolve(request.path[1:]):
119 return InvenTreeSetting.get_setting('LOGIN_ENFORCE_MFA')
120 except Resolver404:
121 pass
122 return False
123
124
125 class CustomAllauthTwoFactorMiddleware(AllauthTwoFactorMiddleware):
126 """This function ensures only frontend code triggers the MFA auth cycle"""
127 def process_request(self, request):
128 try:
129 if not url_matcher.resolve(request.path[1:]):
130 super().process_request(request)
131 except Resolver404:
132 pass
133
134
135 class InvenTreeRemoteUserMiddleware(PersistentRemoteUserMiddleware):
136 """
137 Middleware to check if HTTP-header based auth is enabled and to set it up
138 """
139 header = settings.REMOTE_LOGIN_HEADER
140
141 def process_request(self, request):
142 if not settings.REMOTE_LOGIN:
143 return
144
145 return super().process_request(request)
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/InvenTree/InvenTree/middleware.py b/InvenTree/InvenTree/middleware.py
--- a/InvenTree/InvenTree/middleware.py
+++ b/InvenTree/InvenTree/middleware.py
@@ -3,7 +3,6 @@
from django.conf import settings
from django.contrib.auth.middleware import PersistentRemoteUserMiddleware
from django.http import HttpResponse
-from django.shortcuts import HttpResponseRedirect
from django.shortcuts import redirect
from django.urls import reverse_lazy, Resolver404
from django.urls import include, re_path
@@ -71,10 +70,6 @@
# No authorization was found for the request
if not authorized:
- # A logout request will redirect the user to the login screen
- if request.path_info == reverse_lazy('account_logout'):
- return HttpResponseRedirect(reverse_lazy('account_login'))
-
path = request.path_info
# List of URL endpoints we *do not* want to redirect to
| {"golden_diff": "diff --git a/InvenTree/InvenTree/middleware.py b/InvenTree/InvenTree/middleware.py\n--- a/InvenTree/InvenTree/middleware.py\n+++ b/InvenTree/InvenTree/middleware.py\n@@ -3,7 +3,6 @@\n from django.conf import settings\n from django.contrib.auth.middleware import PersistentRemoteUserMiddleware\n from django.http import HttpResponse\n-from django.shortcuts import HttpResponseRedirect\n from django.shortcuts import redirect\n from django.urls import reverse_lazy, Resolver404\n from django.urls import include, re_path\n@@ -71,10 +70,6 @@\n \n # No authorization was found for the request\n if not authorized:\n- # A logout request will redirect the user to the login screen\n- if request.path_info == reverse_lazy('account_logout'):\n- return HttpResponseRedirect(reverse_lazy('account_login'))\n-\n path = request.path_info\n \n # List of URL endpoints we *do not* want to redirect to\n", "issue": "Add unittests for auth stack\nAdd full coverage for https://github.com/inventree/InvenTree/pull/2976\r\nAnd the full auth stack in the middleware\nAdd unittests for auth stack\nAdd full coverage for https://github.com/inventree/InvenTree/pull/2976\r\nAnd the full auth stack in the middleware\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.contrib.auth.middleware import PersistentRemoteUserMiddleware\nfrom django.http import HttpResponse\nfrom django.shortcuts import HttpResponseRedirect\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy, Resolver404\nfrom django.urls import include, re_path\n\nimport logging\n\nfrom rest_framework.authtoken.models import Token\nfrom allauth_2fa.middleware import BaseRequire2FAMiddleware, AllauthTwoFactorMiddleware\n\nfrom InvenTree.urls import frontendpatterns\nfrom common.models import InvenTreeSetting\n\n\nlogger = logging.getLogger(\"inventree\")\n\n\nclass AuthRequiredMiddleware(object):\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n\n assert hasattr(request, 'user')\n\n # API requests are handled by the DRF library\n if request.path_info.startswith('/api/'):\n return self.get_response(request)\n\n if not request.user.is_authenticated:\n \"\"\"\n Normally, a web-based session would use csrftoken based authentication.\n However when running an external application (e.g. the InvenTree app or Python library),\n we must validate the user token manually.\n \"\"\"\n\n authorized = False\n\n # Allow static files to be accessed without auth\n # Important for e.g. login page\n if request.path_info.startswith('/static/'):\n authorized = True\n\n # Unauthorized users can access the login page\n elif request.path_info.startswith('/accounts/'):\n authorized = True\n\n elif 'Authorization' in request.headers.keys() or 'authorization' in request.headers.keys():\n auth = request.headers.get('Authorization', request.headers.get('authorization')).strip()\n\n if auth.lower().startswith('token') and len(auth.split()) == 2:\n token_key = auth.split()[1]\n\n # Does the provided token match a valid user?\n try:\n token = Token.objects.get(key=token_key)\n\n # Provide the user information to the request\n request.user = token.user\n authorized = True\n\n except Token.DoesNotExist:\n logger.warning(f\"Access denied for unknown token {token_key}\")\n\n # No authorization was found for the request\n if not authorized:\n # A logout request will redirect the user to the login screen\n if request.path_info == reverse_lazy('account_logout'):\n return HttpResponseRedirect(reverse_lazy('account_login'))\n\n path = request.path_info\n\n # List of URL endpoints we *do not* want to redirect to\n urls = [\n reverse_lazy('account_login'),\n reverse_lazy('account_logout'),\n reverse_lazy('admin:login'),\n reverse_lazy('admin:logout'),\n ]\n\n # Do not redirect requests to any of these paths\n paths_ignore = [\n '/api/',\n '/js/',\n '/media/',\n '/static/',\n ]\n\n if path not in urls and not any([path.startswith(p) for p in paths_ignore]):\n # Save the 'next' parameter to pass through to the login view\n\n return redirect('{}?next={}'.format(reverse_lazy('account_login'), request.path))\n\n else:\n # Return a 401 (Unauthorized) response code for this request\n return HttpResponse('Unauthorized', status=401)\n\n response = self.get_response(request)\n\n return response\n\n\nurl_matcher = re_path('', include(frontendpatterns))\n\n\nclass Check2FAMiddleware(BaseRequire2FAMiddleware):\n \"\"\"check if user is required to have MFA enabled\"\"\"\n def require_2fa(self, request):\n # Superusers are require to have 2FA.\n try:\n if url_matcher.resolve(request.path[1:]):\n return InvenTreeSetting.get_setting('LOGIN_ENFORCE_MFA')\n except Resolver404:\n pass\n return False\n\n\nclass CustomAllauthTwoFactorMiddleware(AllauthTwoFactorMiddleware):\n \"\"\"This function ensures only frontend code triggers the MFA auth cycle\"\"\"\n def process_request(self, request):\n try:\n if not url_matcher.resolve(request.path[1:]):\n super().process_request(request)\n except Resolver404:\n pass\n\n\nclass InvenTreeRemoteUserMiddleware(PersistentRemoteUserMiddleware):\n \"\"\"\n Middleware to check if HTTP-header based auth is enabled and to set it up\n \"\"\"\n header = settings.REMOTE_LOGIN_HEADER\n\n def process_request(self, request):\n if not settings.REMOTE_LOGIN:\n return\n\n return super().process_request(request)\n", "path": "InvenTree/InvenTree/middleware.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.contrib.auth.middleware import PersistentRemoteUserMiddleware\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy, Resolver404\nfrom django.urls import include, re_path\n\nimport logging\n\nfrom rest_framework.authtoken.models import Token\nfrom allauth_2fa.middleware import BaseRequire2FAMiddleware, AllauthTwoFactorMiddleware\n\nfrom InvenTree.urls import frontendpatterns\nfrom common.models import InvenTreeSetting\n\n\nlogger = logging.getLogger(\"inventree\")\n\n\nclass AuthRequiredMiddleware(object):\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n\n assert hasattr(request, 'user')\n\n # API requests are handled by the DRF library\n if request.path_info.startswith('/api/'):\n return self.get_response(request)\n\n if not request.user.is_authenticated:\n \"\"\"\n Normally, a web-based session would use csrftoken based authentication.\n However when running an external application (e.g. the InvenTree app or Python library),\n we must validate the user token manually.\n \"\"\"\n\n authorized = False\n\n # Allow static files to be accessed without auth\n # Important for e.g. login page\n if request.path_info.startswith('/static/'):\n authorized = True\n\n # Unauthorized users can access the login page\n elif request.path_info.startswith('/accounts/'):\n authorized = True\n\n elif 'Authorization' in request.headers.keys() or 'authorization' in request.headers.keys():\n auth = request.headers.get('Authorization', request.headers.get('authorization')).strip()\n\n if auth.lower().startswith('token') and len(auth.split()) == 2:\n token_key = auth.split()[1]\n\n # Does the provided token match a valid user?\n try:\n token = Token.objects.get(key=token_key)\n\n # Provide the user information to the request\n request.user = token.user\n authorized = True\n\n except Token.DoesNotExist:\n logger.warning(f\"Access denied for unknown token {token_key}\")\n\n # No authorization was found for the request\n if not authorized:\n path = request.path_info\n\n # List of URL endpoints we *do not* want to redirect to\n urls = [\n reverse_lazy('account_login'),\n reverse_lazy('account_logout'),\n reverse_lazy('admin:login'),\n reverse_lazy('admin:logout'),\n ]\n\n # Do not redirect requests to any of these paths\n paths_ignore = [\n '/api/',\n '/js/',\n '/media/',\n '/static/',\n ]\n\n if path not in urls and not any([path.startswith(p) for p in paths_ignore]):\n # Save the 'next' parameter to pass through to the login view\n\n return redirect('{}?next={}'.format(reverse_lazy('account_login'), request.path))\n\n else:\n # Return a 401 (Unauthorized) response code for this request\n return HttpResponse('Unauthorized', status=401)\n\n response = self.get_response(request)\n\n return response\n\n\nurl_matcher = re_path('', include(frontendpatterns))\n\n\nclass Check2FAMiddleware(BaseRequire2FAMiddleware):\n \"\"\"check if user is required to have MFA enabled\"\"\"\n def require_2fa(self, request):\n # Superusers are require to have 2FA.\n try:\n if url_matcher.resolve(request.path[1:]):\n return InvenTreeSetting.get_setting('LOGIN_ENFORCE_MFA')\n except Resolver404:\n pass\n return False\n\n\nclass CustomAllauthTwoFactorMiddleware(AllauthTwoFactorMiddleware):\n \"\"\"This function ensures only frontend code triggers the MFA auth cycle\"\"\"\n def process_request(self, request):\n try:\n if not url_matcher.resolve(request.path[1:]):\n super().process_request(request)\n except Resolver404:\n pass\n\n\nclass InvenTreeRemoteUserMiddleware(PersistentRemoteUserMiddleware):\n \"\"\"\n Middleware to check if HTTP-header based auth is enabled and to set it up\n \"\"\"\n header = settings.REMOTE_LOGIN_HEADER\n\n def process_request(self, request):\n if not settings.REMOTE_LOGIN:\n return\n\n return super().process_request(request)\n", "path": "InvenTree/InvenTree/middleware.py"}]} | 1,666 | 213 |
gh_patches_debug_1058 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-4303 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Domain missing from Holland & Barrett website URLs
In the holland_and_barrett spider results, the website values returned are missing the domain, e.g. `"website": "/stores/aylesbury-3180/"`. This is what's in the code that the scraper is reading. But presumably AllThePlaces should return a fully qualified url, i.e. `https://www.hollandandbarrett.com/stores/aylesbury-3180/` in this case.
I don't know what the micordata etc standards say about whether relative URLs are allowed, but perhaps the framework code could be modified to automatically complete the URL of the page if a relative URL is harvested.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/holland_and_barrett.py`
Content:
```
1 from scrapy.spiders import SitemapSpider
2
3 from locations.linked_data_parser import LinkedDataParser
4
5
6 class HollandAndBarrettSpider(SitemapSpider):
7 name = "holland_and_barrett"
8 item_attributes = {
9 "brand": "Holland & Barrett",
10 "brand_wikidata": "Q5880870",
11 }
12 sitemap_urls = [
13 "https://www.hollandandbarrett.com/sitemap-stores.xml",
14 "https://www.hollandandbarrett.nl/sitemap-stores.xml",
15 "https://www.hollandandbarrett.be/sitemap-stores.xml",
16 "https://www.hollandandbarrett.ie/sitemap-stores.xml",
17 ]
18 sitemap_rules = [("/stores/", "parse"), ("/winkels/", "parse")]
19 download_delay = 1.0
20
21 def parse(self, response):
22 yield LinkedDataParser.parse(response, "LocalBusiness")
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/holland_and_barrett.py b/locations/spiders/holland_and_barrett.py
--- a/locations/spiders/holland_and_barrett.py
+++ b/locations/spiders/holland_and_barrett.py
@@ -19,4 +19,6 @@
download_delay = 1.0
def parse(self, response):
- yield LinkedDataParser.parse(response, "LocalBusiness")
+ item = LinkedDataParser.parse(response, "LocalBusiness")
+ item["website"] = response.urljoin(item["website"])
+ yield item
| {"golden_diff": "diff --git a/locations/spiders/holland_and_barrett.py b/locations/spiders/holland_and_barrett.py\n--- a/locations/spiders/holland_and_barrett.py\n+++ b/locations/spiders/holland_and_barrett.py\n@@ -19,4 +19,6 @@\n download_delay = 1.0\n \n def parse(self, response):\n- yield LinkedDataParser.parse(response, \"LocalBusiness\")\n+ item = LinkedDataParser.parse(response, \"LocalBusiness\")\n+ item[\"website\"] = response.urljoin(item[\"website\"])\n+ yield item\n", "issue": "Domain missing from Holland & Barrett website URLs\nIn the holland_and_barrett spider results, the website values returned are missing the domain, e.g. `\"website\": \"/stores/aylesbury-3180/\"`. This is what's in the code that the scraper is reading. But presumably AllThePlaces should return a fully qualified url, i.e. `https://www.hollandandbarrett.com/stores/aylesbury-3180/` in this case.\r\n\r\nI don't know what the micordata etc standards say about whether relative URLs are allowed, but perhaps the framework code could be modified to automatically complete the URL of the page if a relative URL is harvested.\n", "before_files": [{"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.linked_data_parser import LinkedDataParser\n\n\nclass HollandAndBarrettSpider(SitemapSpider):\n name = \"holland_and_barrett\"\n item_attributes = {\n \"brand\": \"Holland & Barrett\",\n \"brand_wikidata\": \"Q5880870\",\n }\n sitemap_urls = [\n \"https://www.hollandandbarrett.com/sitemap-stores.xml\",\n \"https://www.hollandandbarrett.nl/sitemap-stores.xml\",\n \"https://www.hollandandbarrett.be/sitemap-stores.xml\",\n \"https://www.hollandandbarrett.ie/sitemap-stores.xml\",\n ]\n sitemap_rules = [(\"/stores/\", \"parse\"), (\"/winkels/\", \"parse\")]\n download_delay = 1.0\n\n def parse(self, response):\n yield LinkedDataParser.parse(response, \"LocalBusiness\")\n", "path": "locations/spiders/holland_and_barrett.py"}], "after_files": [{"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.linked_data_parser import LinkedDataParser\n\n\nclass HollandAndBarrettSpider(SitemapSpider):\n name = \"holland_and_barrett\"\n item_attributes = {\n \"brand\": \"Holland & Barrett\",\n \"brand_wikidata\": \"Q5880870\",\n }\n sitemap_urls = [\n \"https://www.hollandandbarrett.com/sitemap-stores.xml\",\n \"https://www.hollandandbarrett.nl/sitemap-stores.xml\",\n \"https://www.hollandandbarrett.be/sitemap-stores.xml\",\n \"https://www.hollandandbarrett.ie/sitemap-stores.xml\",\n ]\n sitemap_rules = [(\"/stores/\", \"parse\"), (\"/winkels/\", \"parse\")]\n download_delay = 1.0\n\n def parse(self, response):\n item = LinkedDataParser.parse(response, \"LocalBusiness\")\n item[\"website\"] = response.urljoin(item[\"website\"])\n yield item\n", "path": "locations/spiders/holland_and_barrett.py"}]} | 643 | 126 |
gh_patches_debug_36801 | rasdani/github-patches | git_diff | pypa__pip-7216 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add SSL CA certificate information to `pip debug`
**What's the problem this feature will solve?**
As described in [#6720 (comment)](https://github.com/pypa/pip/issues/6720#issuecomment-538791684), pip may be using several sources of information for the CA certificate bundle to use for HTTPS requests. This makes it hard to debug user issues.
**Describe the solution you'd like**
In the output of `pip debug` we should include:
* the `cert` setting from the highest-priority pip configuration file (~~and the configuration file path~~) - on second thought the location doesn't matter much
* `os.environ.get('REQUESTS_CA_BUNDLE')`
* `os.environ.get('CURL_CA_BUNDLE')`
* `pip._vendor.certifi.where()`
This will provide insight into the CA certificate bundle in use for a given request, which can then be used in instructions to the user in conjunction with curl/openssl to submit an HTTP request independent of pip and rule out pip-specific issues.
**Alternative Solutions**
Do nothing.
**Additional context**
* #4459
* #4919
* #6335
* #6720
* #6915
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/commands/debug.py`
Content:
```
1 # The following comment should be removed at some point in the future.
2 # mypy: disallow-untyped-defs=False
3
4 from __future__ import absolute_import
5
6 import locale
7 import logging
8 import sys
9
10 from pip._internal.cli import cmdoptions
11 from pip._internal.cli.base_command import Command
12 from pip._internal.cli.cmdoptions import make_target_python
13 from pip._internal.cli.status_codes import SUCCESS
14 from pip._internal.utils.logging import indent_log
15 from pip._internal.utils.misc import get_pip_version
16 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
17 from pip._internal.wheel import format_tag
18
19 if MYPY_CHECK_RUNNING:
20 from typing import Any, List
21 from optparse import Values
22
23 logger = logging.getLogger(__name__)
24
25
26 def show_value(name, value):
27 # type: (str, str) -> None
28 logger.info('{}: {}'.format(name, value))
29
30
31 def show_sys_implementation():
32 # type: () -> None
33 logger.info('sys.implementation:')
34 if hasattr(sys, 'implementation'):
35 implementation = sys.implementation # type: ignore
36 implementation_name = implementation.name
37 else:
38 implementation_name = ''
39
40 with indent_log():
41 show_value('name', implementation_name)
42
43
44 def show_tags(options):
45 # type: (Values) -> None
46 tag_limit = 10
47
48 target_python = make_target_python(options)
49 tags = target_python.get_tags()
50
51 # Display the target options that were explicitly provided.
52 formatted_target = target_python.format_given()
53 suffix = ''
54 if formatted_target:
55 suffix = ' (target: {})'.format(formatted_target)
56
57 msg = 'Compatible tags: {}{}'.format(len(tags), suffix)
58 logger.info(msg)
59
60 if options.verbose < 1 and len(tags) > tag_limit:
61 tags_limited = True
62 tags = tags[:tag_limit]
63 else:
64 tags_limited = False
65
66 with indent_log():
67 for tag in tags:
68 logger.info(format_tag(tag))
69
70 if tags_limited:
71 msg = (
72 '...\n'
73 '[First {tag_limit} tags shown. Pass --verbose to show all.]'
74 ).format(tag_limit=tag_limit)
75 logger.info(msg)
76
77
78 class DebugCommand(Command):
79 """
80 Display debug information.
81 """
82
83 usage = """
84 %prog <options>"""
85 ignore_require_venv = True
86
87 def __init__(self, *args, **kw):
88 super(DebugCommand, self).__init__(*args, **kw)
89
90 cmd_opts = self.cmd_opts
91 cmdoptions.add_target_python_options(cmd_opts)
92 self.parser.insert_option_group(0, cmd_opts)
93
94 def run(self, options, args):
95 # type: (Values, List[Any]) -> int
96 logger.warning(
97 "This command is only meant for debugging. "
98 "Do not use this with automation for parsing and getting these "
99 "details, since the output and options of this command may "
100 "change without notice."
101 )
102 show_value('pip version', get_pip_version())
103 show_value('sys.version', sys.version)
104 show_value('sys.executable', sys.executable)
105 show_value('sys.getdefaultencoding', sys.getdefaultencoding())
106 show_value('sys.getfilesystemencoding', sys.getfilesystemencoding())
107 show_value(
108 'locale.getpreferredencoding', locale.getpreferredencoding(),
109 )
110 show_value('sys.platform', sys.platform)
111 show_sys_implementation()
112
113 show_tags(options)
114
115 return SUCCESS
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/commands/debug.py b/src/pip/_internal/commands/debug.py
--- a/src/pip/_internal/commands/debug.py
+++ b/src/pip/_internal/commands/debug.py
@@ -5,8 +5,11 @@
import locale
import logging
+import os
import sys
+from pip._vendor.certifi import where
+
from pip._internal.cli import cmdoptions
from pip._internal.cli.base_command import Command
from pip._internal.cli.cmdoptions import make_target_python
@@ -17,14 +20,14 @@
from pip._internal.wheel import format_tag
if MYPY_CHECK_RUNNING:
- from typing import Any, List
+ from typing import Any, List, Optional
from optparse import Values
logger = logging.getLogger(__name__)
def show_value(name, value):
- # type: (str, str) -> None
+ # type: (str, Optional[str]) -> None
logger.info('{}: {}'.format(name, value))
@@ -75,6 +78,25 @@
logger.info(msg)
+def ca_bundle_info(config):
+ levels = set()
+ for key, value in config.items():
+ levels.add(key.split('.')[0])
+
+ if not levels:
+ return "Not specified"
+
+ levels_that_override_global = ['install', 'wheel', 'download']
+ global_overriding_level = [
+ level for level in levels if level in levels_that_override_global
+ ]
+ if not global_overriding_level:
+ return 'global'
+
+ levels.remove('global')
+ return ", ".join(levels)
+
+
class DebugCommand(Command):
"""
Display debug information.
@@ -90,6 +112,7 @@
cmd_opts = self.cmd_opts
cmdoptions.add_target_python_options(cmd_opts)
self.parser.insert_option_group(0, cmd_opts)
+ self.parser.config.load()
def run(self, options, args):
# type: (Values, List[Any]) -> int
@@ -110,6 +133,11 @@
show_value('sys.platform', sys.platform)
show_sys_implementation()
+ show_value("'cert' config value", ca_bundle_info(self.parser.config))
+ show_value("REQUESTS_CA_BUNDLE", os.environ.get('REQUESTS_CA_BUNDLE'))
+ show_value("CURL_CA_BUNDLE", os.environ.get('CURL_CA_BUNDLE'))
+ show_value("pip._vendor.certifi.where()", where())
+
show_tags(options)
return SUCCESS
| {"golden_diff": "diff --git a/src/pip/_internal/commands/debug.py b/src/pip/_internal/commands/debug.py\n--- a/src/pip/_internal/commands/debug.py\n+++ b/src/pip/_internal/commands/debug.py\n@@ -5,8 +5,11 @@\n \n import locale\n import logging\n+import os\n import sys\n \n+from pip._vendor.certifi import where\n+\n from pip._internal.cli import cmdoptions\n from pip._internal.cli.base_command import Command\n from pip._internal.cli.cmdoptions import make_target_python\n@@ -17,14 +20,14 @@\n from pip._internal.wheel import format_tag\n \n if MYPY_CHECK_RUNNING:\n- from typing import Any, List\n+ from typing import Any, List, Optional\n from optparse import Values\n \n logger = logging.getLogger(__name__)\n \n \n def show_value(name, value):\n- # type: (str, str) -> None\n+ # type: (str, Optional[str]) -> None\n logger.info('{}: {}'.format(name, value))\n \n \n@@ -75,6 +78,25 @@\n logger.info(msg)\n \n \n+def ca_bundle_info(config):\n+ levels = set()\n+ for key, value in config.items():\n+ levels.add(key.split('.')[0])\n+\n+ if not levels:\n+ return \"Not specified\"\n+\n+ levels_that_override_global = ['install', 'wheel', 'download']\n+ global_overriding_level = [\n+ level for level in levels if level in levels_that_override_global\n+ ]\n+ if not global_overriding_level:\n+ return 'global'\n+\n+ levels.remove('global')\n+ return \", \".join(levels)\n+\n+\n class DebugCommand(Command):\n \"\"\"\n Display debug information.\n@@ -90,6 +112,7 @@\n cmd_opts = self.cmd_opts\n cmdoptions.add_target_python_options(cmd_opts)\n self.parser.insert_option_group(0, cmd_opts)\n+ self.parser.config.load()\n \n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n@@ -110,6 +133,11 @@\n show_value('sys.platform', sys.platform)\n show_sys_implementation()\n \n+ show_value(\"'cert' config value\", ca_bundle_info(self.parser.config))\n+ show_value(\"REQUESTS_CA_BUNDLE\", os.environ.get('REQUESTS_CA_BUNDLE'))\n+ show_value(\"CURL_CA_BUNDLE\", os.environ.get('CURL_CA_BUNDLE'))\n+ show_value(\"pip._vendor.certifi.where()\", where())\n+\n show_tags(options)\n \n return SUCCESS\n", "issue": "Add SSL CA certificate information to `pip debug`\n**What's the problem this feature will solve?**\r\n\r\nAs described in [#6720 (comment)](https://github.com/pypa/pip/issues/6720#issuecomment-538791684), pip may be using several sources of information for the CA certificate bundle to use for HTTPS requests. This makes it hard to debug user issues.\r\n\r\n**Describe the solution you'd like**\r\n\r\nIn the output of `pip debug` we should include:\r\n\r\n* the `cert` setting from the highest-priority pip configuration file (~~and the configuration file path~~) - on second thought the location doesn't matter much\r\n* `os.environ.get('REQUESTS_CA_BUNDLE')`\r\n* `os.environ.get('CURL_CA_BUNDLE')`\r\n* `pip._vendor.certifi.where()`\r\n\r\nThis will provide insight into the CA certificate bundle in use for a given request, which can then be used in instructions to the user in conjunction with curl/openssl to submit an HTTP request independent of pip and rule out pip-specific issues.\r\n\r\n**Alternative Solutions**\r\n\r\nDo nothing.\r\n\r\n**Additional context**\r\n\r\n* #4459\r\n* #4919\r\n* #6335\r\n* #6720\r\n* #6915\n", "before_files": [{"content": "# The following comment should be removed at some point in the future.\n# mypy: disallow-untyped-defs=False\n\nfrom __future__ import absolute_import\n\nimport locale\nimport logging\nimport sys\n\nfrom pip._internal.cli import cmdoptions\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.cmdoptions import make_target_python\nfrom pip._internal.cli.status_codes import SUCCESS\nfrom pip._internal.utils.logging import indent_log\nfrom pip._internal.utils.misc import get_pip_version\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\nfrom pip._internal.wheel import format_tag\n\nif MYPY_CHECK_RUNNING:\n from typing import Any, List\n from optparse import Values\n\nlogger = logging.getLogger(__name__)\n\n\ndef show_value(name, value):\n # type: (str, str) -> None\n logger.info('{}: {}'.format(name, value))\n\n\ndef show_sys_implementation():\n # type: () -> None\n logger.info('sys.implementation:')\n if hasattr(sys, 'implementation'):\n implementation = sys.implementation # type: ignore\n implementation_name = implementation.name\n else:\n implementation_name = ''\n\n with indent_log():\n show_value('name', implementation_name)\n\n\ndef show_tags(options):\n # type: (Values) -> None\n tag_limit = 10\n\n target_python = make_target_python(options)\n tags = target_python.get_tags()\n\n # Display the target options that were explicitly provided.\n formatted_target = target_python.format_given()\n suffix = ''\n if formatted_target:\n suffix = ' (target: {})'.format(formatted_target)\n\n msg = 'Compatible tags: {}{}'.format(len(tags), suffix)\n logger.info(msg)\n\n if options.verbose < 1 and len(tags) > tag_limit:\n tags_limited = True\n tags = tags[:tag_limit]\n else:\n tags_limited = False\n\n with indent_log():\n for tag in tags:\n logger.info(format_tag(tag))\n\n if tags_limited:\n msg = (\n '...\\n'\n '[First {tag_limit} tags shown. Pass --verbose to show all.]'\n ).format(tag_limit=tag_limit)\n logger.info(msg)\n\n\nclass DebugCommand(Command):\n \"\"\"\n Display debug information.\n \"\"\"\n\n usage = \"\"\"\n %prog <options>\"\"\"\n ignore_require_venv = True\n\n def __init__(self, *args, **kw):\n super(DebugCommand, self).__init__(*args, **kw)\n\n cmd_opts = self.cmd_opts\n cmdoptions.add_target_python_options(cmd_opts)\n self.parser.insert_option_group(0, cmd_opts)\n\n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n logger.warning(\n \"This command is only meant for debugging. \"\n \"Do not use this with automation for parsing and getting these \"\n \"details, since the output and options of this command may \"\n \"change without notice.\"\n )\n show_value('pip version', get_pip_version())\n show_value('sys.version', sys.version)\n show_value('sys.executable', sys.executable)\n show_value('sys.getdefaultencoding', sys.getdefaultencoding())\n show_value('sys.getfilesystemencoding', sys.getfilesystemencoding())\n show_value(\n 'locale.getpreferredencoding', locale.getpreferredencoding(),\n )\n show_value('sys.platform', sys.platform)\n show_sys_implementation()\n\n show_tags(options)\n\n return SUCCESS\n", "path": "src/pip/_internal/commands/debug.py"}], "after_files": [{"content": "# The following comment should be removed at some point in the future.\n# mypy: disallow-untyped-defs=False\n\nfrom __future__ import absolute_import\n\nimport locale\nimport logging\nimport os\nimport sys\n\nfrom pip._vendor.certifi import where\n\nfrom pip._internal.cli import cmdoptions\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.cmdoptions import make_target_python\nfrom pip._internal.cli.status_codes import SUCCESS\nfrom pip._internal.utils.logging import indent_log\nfrom pip._internal.utils.misc import get_pip_version\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\nfrom pip._internal.wheel import format_tag\n\nif MYPY_CHECK_RUNNING:\n from typing import Any, List, Optional\n from optparse import Values\n\nlogger = logging.getLogger(__name__)\n\n\ndef show_value(name, value):\n # type: (str, Optional[str]) -> None\n logger.info('{}: {}'.format(name, value))\n\n\ndef show_sys_implementation():\n # type: () -> None\n logger.info('sys.implementation:')\n if hasattr(sys, 'implementation'):\n implementation = sys.implementation # type: ignore\n implementation_name = implementation.name\n else:\n implementation_name = ''\n\n with indent_log():\n show_value('name', implementation_name)\n\n\ndef show_tags(options):\n # type: (Values) -> None\n tag_limit = 10\n\n target_python = make_target_python(options)\n tags = target_python.get_tags()\n\n # Display the target options that were explicitly provided.\n formatted_target = target_python.format_given()\n suffix = ''\n if formatted_target:\n suffix = ' (target: {})'.format(formatted_target)\n\n msg = 'Compatible tags: {}{}'.format(len(tags), suffix)\n logger.info(msg)\n\n if options.verbose < 1 and len(tags) > tag_limit:\n tags_limited = True\n tags = tags[:tag_limit]\n else:\n tags_limited = False\n\n with indent_log():\n for tag in tags:\n logger.info(format_tag(tag))\n\n if tags_limited:\n msg = (\n '...\\n'\n '[First {tag_limit} tags shown. Pass --verbose to show all.]'\n ).format(tag_limit=tag_limit)\n logger.info(msg)\n\n\ndef ca_bundle_info(config):\n levels = set()\n for key, value in config.items():\n levels.add(key.split('.')[0])\n\n if not levels:\n return \"Not specified\"\n\n levels_that_override_global = ['install', 'wheel', 'download']\n global_overriding_level = [\n level for level in levels if level in levels_that_override_global\n ]\n if not global_overriding_level:\n return 'global'\n\n levels.remove('global')\n return \", \".join(levels)\n\n\nclass DebugCommand(Command):\n \"\"\"\n Display debug information.\n \"\"\"\n\n usage = \"\"\"\n %prog <options>\"\"\"\n ignore_require_venv = True\n\n def __init__(self, *args, **kw):\n super(DebugCommand, self).__init__(*args, **kw)\n\n cmd_opts = self.cmd_opts\n cmdoptions.add_target_python_options(cmd_opts)\n self.parser.insert_option_group(0, cmd_opts)\n self.parser.config.load()\n\n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n logger.warning(\n \"This command is only meant for debugging. \"\n \"Do not use this with automation for parsing and getting these \"\n \"details, since the output and options of this command may \"\n \"change without notice.\"\n )\n show_value('pip version', get_pip_version())\n show_value('sys.version', sys.version)\n show_value('sys.executable', sys.executable)\n show_value('sys.getdefaultencoding', sys.getdefaultencoding())\n show_value('sys.getfilesystemencoding', sys.getfilesystemencoding())\n show_value(\n 'locale.getpreferredencoding', locale.getpreferredencoding(),\n )\n show_value('sys.platform', sys.platform)\n show_sys_implementation()\n\n show_value(\"'cert' config value\", ca_bundle_info(self.parser.config))\n show_value(\"REQUESTS_CA_BUNDLE\", os.environ.get('REQUESTS_CA_BUNDLE'))\n show_value(\"CURL_CA_BUNDLE\", os.environ.get('CURL_CA_BUNDLE'))\n show_value(\"pip._vendor.certifi.where()\", where())\n\n show_tags(options)\n\n return SUCCESS\n", "path": "src/pip/_internal/commands/debug.py"}]} | 1,543 | 569 |
gh_patches_debug_19632 | rasdani/github-patches | git_diff | networkx__networkx-3628 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
jit json import/export
I would consider the functions `jit_data` and `jit_graph` to be their inverses, so that
```
import networkx as nx
nx.jit_graph(nx.jit_data(nx.Graph()))
```
works.
Instead, it produces a TypeError (nx 2.2), because jit_data is a function `nx graph -> json string`, while jit_graph is a function `json object -> nx graph`, so that the correct program would be
```
import networkx as nx
import json
nx.jit_graph(json.loads(nx.jit_data(nx.Graph())))
```
This is documented, but in my view unexpected and incoherent behavior. I'm pretty new to networkx and are not familiar with your design philosophy, but see the options
* to add a clarifying note in the documentation OR
* return the json object in `jit_data` OR
* make use of the json.loads function in `jit_graph`.
What are your opinions on this?
I am willing to submit a PR (but probably it is just easier for you to make that oneline-commit, so that's also fine :))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/readwrite/json_graph/jit.py`
Content:
```
1 # Copyright (C) 2011-2019 by
2 # Aric Hagberg <[email protected]>
3 # Dan Schult <[email protected]>
4 # Pieter Swart <[email protected]>
5 # All rights reserved.
6 # BSD license.
7
8 """
9 Read and write NetworkX graphs as JavaScript InfoVis Toolkit (JIT) format JSON.
10
11 See the `JIT documentation`_ for more examples.
12
13 Format
14 ------
15 var json = [
16 {
17 "id": "aUniqueIdentifier",
18 "name": "usually a nodes name",
19 "data": {
20 "some key": "some value",
21 "some other key": "some other value"
22 },
23 "adjacencies": [
24 {
25 nodeTo:"aNodeId",
26 data: {} //put whatever you want here
27 },
28 'other adjacencies go here...'
29 },
30
31 'other nodes go here...'
32 ];
33 .. _JIT documentation: http://thejit.org
34 """
35
36 import json
37 import networkx as nx
38 from networkx.utils.decorators import not_implemented_for
39
40 __all__ = ['jit_graph', 'jit_data']
41
42
43 def jit_graph(data, create_using=None):
44 """Read a graph from JIT JSON.
45
46 Parameters
47 ----------
48 data : JSON Graph Object
49
50 create_using : Networkx Graph, optional (default: Graph())
51 Return graph of this type. The provided instance will be cleared.
52
53 Returns
54 -------
55 G : NetworkX Graph built from create_using if provided.
56 """
57 if create_using is None:
58 G = nx.Graph()
59 else:
60 G = create_using
61 G.clear()
62
63 for node in data:
64 G.add_node(node['id'], **node['data'])
65 if node.get('adjacencies') is not None:
66 for adj in node['adjacencies']:
67 G.add_edge(node['id'], adj['nodeTo'], **adj['data'])
68 return G
69
70
71 @not_implemented_for('multigraph')
72 def jit_data(G, indent=None):
73 """Returns data in JIT JSON format.
74
75 Parameters
76 ----------
77 G : NetworkX Graph
78
79 indent: optional, default=None
80 If indent is a non-negative integer, then JSON array elements and object
81 members will be pretty-printed with that indent level. An indent level
82 of 0, or negative, will only insert newlines. None (the default) selects
83 the most compact representation.
84
85 Returns
86 -------
87 data: JIT JSON string
88 """
89 json_graph = []
90 for node in G.nodes():
91 json_node = {
92 "id": node,
93 "name": node
94 }
95 # node data
96 json_node["data"] = G.nodes[node]
97 # adjacencies
98 if G[node]:
99 json_node["adjacencies"] = []
100 for neighbour in G[node]:
101 adjacency = {
102 "nodeTo": neighbour,
103 }
104 # adjacency data
105 adjacency["data"] = G.edges[node, neighbour]
106 json_node["adjacencies"].append(adjacency)
107 json_graph.append(json_node)
108 return json.dumps(json_graph, indent=indent)
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/networkx/readwrite/json_graph/jit.py b/networkx/readwrite/json_graph/jit.py
--- a/networkx/readwrite/json_graph/jit.py
+++ b/networkx/readwrite/json_graph/jit.py
@@ -60,6 +60,9 @@
G = create_using
G.clear()
+ if nx.utils.is_string_like(data):
+ data = json.loads(data)
+
for node in data:
G.add_node(node['id'], **node['data'])
if node.get('adjacencies') is not None:
@@ -77,10 +80,10 @@
G : NetworkX Graph
indent: optional, default=None
- If indent is a non-negative integer, then JSON array elements and object
- members will be pretty-printed with that indent level. An indent level
- of 0, or negative, will only insert newlines. None (the default) selects
- the most compact representation.
+ If indent is a non-negative integer, then JSON array elements and
+ object members will be pretty-printed with that indent level.
+ An indent level of 0, or negative, will only insert newlines.
+ None (the default) selects the most compact representation.
Returns
-------
| {"golden_diff": "diff --git a/networkx/readwrite/json_graph/jit.py b/networkx/readwrite/json_graph/jit.py\n--- a/networkx/readwrite/json_graph/jit.py\n+++ b/networkx/readwrite/json_graph/jit.py\n@@ -60,6 +60,9 @@\n G = create_using\n G.clear()\n \n+ if nx.utils.is_string_like(data):\n+ data = json.loads(data)\n+\n for node in data:\n G.add_node(node['id'], **node['data'])\n if node.get('adjacencies') is not None:\n@@ -77,10 +80,10 @@\n G : NetworkX Graph\n \n indent: optional, default=None\n- If indent is a non-negative integer, then JSON array elements and object\n- members will be pretty-printed with that indent level. An indent level\n- of 0, or negative, will only insert newlines. None (the default) selects\n- the most compact representation.\n+ If indent is a non-negative integer, then JSON array elements and\n+ object members will be pretty-printed with that indent level.\n+ An indent level of 0, or negative, will only insert newlines.\n+ None (the default) selects the most compact representation.\n \n Returns\n -------\n", "issue": "jit json import/export\nI would consider the functions `jit_data` and `jit_graph` to be their inverses, so that\r\n```\r\nimport networkx as nx\r\nnx.jit_graph(nx.jit_data(nx.Graph()))\r\n```\r\nworks.\r\n\r\nInstead, it produces a TypeError (nx 2.2), because jit_data is a function `nx graph -> json string`, while jit_graph is a function `json object -> nx graph`, so that the correct program would be\r\n```\r\nimport networkx as nx\r\nimport json\r\nnx.jit_graph(json.loads(nx.jit_data(nx.Graph())))\r\n```\r\n\r\nThis is documented, but in my view unexpected and incoherent behavior. I'm pretty new to networkx and are not familiar with your design philosophy, but see the options\r\n* to add a clarifying note in the documentation OR\r\n* return the json object in `jit_data` OR\r\n* make use of the json.loads function in `jit_graph`.\r\n\r\nWhat are your opinions on this?\r\nI am willing to submit a PR (but probably it is just easier for you to make that oneline-commit, so that's also fine :))\n", "before_files": [{"content": "# Copyright (C) 2011-2019 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\n\n\"\"\"\nRead and write NetworkX graphs as JavaScript InfoVis Toolkit (JIT) format JSON.\n\nSee the `JIT documentation`_ for more examples.\n\nFormat\n------\nvar json = [\n {\n \"id\": \"aUniqueIdentifier\",\n \"name\": \"usually a nodes name\",\n \"data\": {\n \"some key\": \"some value\",\n \"some other key\": \"some other value\"\n },\n \"adjacencies\": [\n {\n nodeTo:\"aNodeId\",\n data: {} //put whatever you want here\n },\n 'other adjacencies go here...'\n },\n\n 'other nodes go here...'\n];\n.. _JIT documentation: http://thejit.org\n\"\"\"\n\nimport json\nimport networkx as nx\nfrom networkx.utils.decorators import not_implemented_for\n\n__all__ = ['jit_graph', 'jit_data']\n\n\ndef jit_graph(data, create_using=None):\n \"\"\"Read a graph from JIT JSON.\n\n Parameters\n ----------\n data : JSON Graph Object\n\n create_using : Networkx Graph, optional (default: Graph())\n Return graph of this type. The provided instance will be cleared.\n\n Returns\n -------\n G : NetworkX Graph built from create_using if provided.\n \"\"\"\n if create_using is None:\n G = nx.Graph()\n else:\n G = create_using\n G.clear()\n\n for node in data:\n G.add_node(node['id'], **node['data'])\n if node.get('adjacencies') is not None:\n for adj in node['adjacencies']:\n G.add_edge(node['id'], adj['nodeTo'], **adj['data'])\n return G\n\n\n@not_implemented_for('multigraph')\ndef jit_data(G, indent=None):\n \"\"\"Returns data in JIT JSON format.\n\n Parameters\n ----------\n G : NetworkX Graph\n\n indent: optional, default=None\n If indent is a non-negative integer, then JSON array elements and object\n members will be pretty-printed with that indent level. An indent level\n of 0, or negative, will only insert newlines. None (the default) selects\n the most compact representation.\n\n Returns\n -------\n data: JIT JSON string\n \"\"\"\n json_graph = []\n for node in G.nodes():\n json_node = {\n \"id\": node,\n \"name\": node\n }\n # node data\n json_node[\"data\"] = G.nodes[node]\n # adjacencies\n if G[node]:\n json_node[\"adjacencies\"] = []\n for neighbour in G[node]:\n adjacency = {\n \"nodeTo\": neighbour,\n }\n # adjacency data\n adjacency[\"data\"] = G.edges[node, neighbour]\n json_node[\"adjacencies\"].append(adjacency)\n json_graph.append(json_node)\n return json.dumps(json_graph, indent=indent)\n", "path": "networkx/readwrite/json_graph/jit.py"}], "after_files": [{"content": "# Copyright (C) 2011-2019 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\n\n\"\"\"\nRead and write NetworkX graphs as JavaScript InfoVis Toolkit (JIT) format JSON.\n\nSee the `JIT documentation`_ for more examples.\n\nFormat\n------\nvar json = [\n {\n \"id\": \"aUniqueIdentifier\",\n \"name\": \"usually a nodes name\",\n \"data\": {\n \"some key\": \"some value\",\n \"some other key\": \"some other value\"\n },\n \"adjacencies\": [\n {\n nodeTo:\"aNodeId\",\n data: {} //put whatever you want here\n },\n 'other adjacencies go here...'\n },\n\n 'other nodes go here...'\n];\n.. _JIT documentation: http://thejit.org\n\"\"\"\n\nimport json\nimport networkx as nx\nfrom networkx.utils.decorators import not_implemented_for\n\n__all__ = ['jit_graph', 'jit_data']\n\n\ndef jit_graph(data, create_using=None):\n \"\"\"Read a graph from JIT JSON.\n\n Parameters\n ----------\n data : JSON Graph Object\n\n create_using : Networkx Graph, optional (default: Graph())\n Return graph of this type. The provided instance will be cleared.\n\n Returns\n -------\n G : NetworkX Graph built from create_using if provided.\n \"\"\"\n if create_using is None:\n G = nx.Graph()\n else:\n G = create_using\n G.clear()\n\n if nx.utils.is_string_like(data):\n data = json.loads(data)\n\n for node in data:\n G.add_node(node['id'], **node['data'])\n if node.get('adjacencies') is not None:\n for adj in node['adjacencies']:\n G.add_edge(node['id'], adj['nodeTo'], **adj['data'])\n return G\n\n\n@not_implemented_for('multigraph')\ndef jit_data(G, indent=None):\n \"\"\"Returns data in JIT JSON format.\n\n Parameters\n ----------\n G : NetworkX Graph\n\n indent: optional, default=None\n If indent is a non-negative integer, then JSON array elements and\n object members will be pretty-printed with that indent level.\n An indent level of 0, or negative, will only insert newlines.\n None (the default) selects the most compact representation.\n\n Returns\n -------\n data: JIT JSON string\n \"\"\"\n json_graph = []\n for node in G.nodes():\n json_node = {\n \"id\": node,\n \"name\": node\n }\n # node data\n json_node[\"data\"] = G.nodes[node]\n # adjacencies\n if G[node]:\n json_node[\"adjacencies\"] = []\n for neighbour in G[node]:\n adjacency = {\n \"nodeTo\": neighbour,\n }\n # adjacency data\n adjacency[\"data\"] = G.edges[node, neighbour]\n json_node[\"adjacencies\"].append(adjacency)\n json_graph.append(json_node)\n return json.dumps(json_graph, indent=indent)\n", "path": "networkx/readwrite/json_graph/jit.py"}]} | 1,409 | 281 |
gh_patches_debug_7697 | rasdani/github-patches | git_diff | azavea__raster-vision-800 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Config builders cannot have type annotations in __init__
Trying to build [a config class](https://github.com/raster-foundry/raster-vision-plugin/blob/996044a503d09d311105d07da98b31284b6a6e91/src/rf_raster_vision_plugin/raster_source/config.py) with type annotations, you get:
```
In [7]: RfRasterSourceConfigBuilder(RfRasterSourceConfig).build()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-2f92db6db3a6> in <module>()
----> 1 RfRasterSourceConfigBuilder(RfRasterSourceConfig).build()
/opt/src/rastervision/core/config.py in build(self)
99 """
100 self.validate()
--> 101 arguments = set(inspect.getargspec(self.config_class).args)
102 keys = set(self.config.keys())
103 config = {k: self.config[k] for k in (arguments & keys)}
/usr/lib/python3.5/inspect.py in getargspec(func)
1043 getfullargspec(func)
1044 if kwonlyargs or ann:
-> 1045 raise ValueError("Function has keyword-only arguments or annotations"
1046 ", use getfullargspec() API which can support them")
1047 return ArgSpec(args, varargs, varkw, defaults)
ValueError: Function has keyword-only arguments or annotations, use getfullargspec() API which can support them
```
Reproduction
-----
- `docker/run` from the linked repo
- `ipython`
```python
>>> from rf_raster_vision_plugin.raster_source.config import RfRasterSourceConfig, RfRasterSourceConfigBuilder
>>> RfRasterSourceConfigBuilder(RfRasterSourceConfig).build()
```
Expected Behavior
-----
Config builder shouldn't choke on type annotations
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rastervision/core/config.py`
Content:
```
1 from abc import (ABC, abstractmethod)
2 import os
3 import inspect
4
5 from rastervision.utils.files import download_or_copy
6
7
8 class ConfigError(Exception):
9 pass
10
11
12 class Config(ABC):
13 @abstractmethod
14 def to_builder(self):
15 """Return a builder based on this config.
16 """
17 pass # pragma: no cover
18
19 @abstractmethod
20 def to_proto(self):
21 """Returns the protobuf configuration for this config.
22 """
23 pass # pragma: no cover
24
25 def update_for_command(self,
26 command_type,
27 experiment_config,
28 context=None,
29 io_def=None):
30 """Updates this configuration for the given command
31
32 Note: While configuration is immutable for client facing operations,
33 this is an internal operation and mutates the configuration.
34
35 Args:
36 command_type: The command type that is currently being
37 preprocessed. experiment_config: The experiment configuration
38 that this configuration is a part of.
39 context: Optional list of parent configurations, to allow for child
40 configurations contained in collections to understand their
41 context in the experiment configuration.
42
43 Returns:
44 Nothing. Call should mutate the configuration object itself.
45 """
46 pass # pragma: no cover
47
48 @abstractmethod
49 def report_io(self, command_type, io_def):
50 """Updates the given CommandIODefinition.
51
52 So that it includes the inputs, outputs, and missing files for this
53 configuration at this command.
54
55 Args:
56 command_type: The command type that is currently being preprocessed.
57 io_def: The CommandIODefinition that this call should modify.
58
59 Returns: Nothing. This call should make the appropriate calls to the
60 given io_def to mutate its state.
61 """
62 pass
63
64 @staticmethod
65 @abstractmethod
66 def builder():
67 """Returns a new builder that takes this configuration
68 as its starting point.
69 """
70 pass # pragma: no cover
71
72 @staticmethod
73 @abstractmethod
74 def from_proto(msg):
75 """Creates a Config from the specificed protobuf message
76 TODO: Allow loading from file uri or dict
77 """
78 pass # pragma: no cover
79
80
81 class ConfigBuilder(ABC):
82 def __init__(self, config_class, config=None):
83 """Construct a builder.
84
85 Args:
86 config_class: The Config class that this builder builds.
87 config: A dictionary of **kwargs that will eventually be passed
88 into the __init__ method of config_class to build the configuration.
89 This config is modified with the fluent builder methods.
90 """
91 if config is None: # pragma: no cover
92 config = {}
93
94 self.config_class = config_class
95 self.config = config
96
97 def build(self):
98 """Returns the configuration that is built by this builder.
99 """
100 self.validate()
101 arguments = set(inspect.getargspec(self.config_class).args)
102 keys = set(self.config.keys())
103 config = {k: self.config[k] for k in (arguments & keys)}
104 return self.config_class(**config)
105
106 def validate(self):
107 """Validate this config, if there is validation on the builder that
108 is not captured by the required arguments of the config.
109 """
110 pass # pragma: no cover
111
112 @abstractmethod
113 def from_proto(self, msg):
114 """Return a builder that takes the configuration from the proto message
115 as its starting point.
116 """
117 pass # pragma: no cover
118
119
120 class BundledConfigMixin(ABC):
121 """Mixin for configurations that participate in the bundling of a
122 prediction package"""
123
124 @abstractmethod
125 def save_bundle_files(self, bundle_dir):
126 """Place files into a bundle directory for bundling into
127 a prediction package.
128
129 Returns: A tuple of (config, uris) of the modified configuration
130 with the basenames of URIs in place of the original URIs,
131 and a list of URIs that are to be bundled.
132 """
133 pass # pragma: no cover
134
135 def bundle_file(self, uri, bundle_dir):
136 local_path = download_or_copy(uri, bundle_dir)
137 base_name = os.path.basename(local_path)
138 return (local_path, base_name)
139
140 @abstractmethod
141 def load_bundle_files(self, bundle_dir):
142 """Load files from a prediction package bundle directory."""
143 pass # pragma: no cover
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rastervision/core/config.py b/rastervision/core/config.py
--- a/rastervision/core/config.py
+++ b/rastervision/core/config.py
@@ -98,7 +98,7 @@
"""Returns the configuration that is built by this builder.
"""
self.validate()
- arguments = set(inspect.getargspec(self.config_class).args)
+ arguments = set(inspect.getfullargspec(self.config_class).args)
keys = set(self.config.keys())
config = {k: self.config[k] for k in (arguments & keys)}
return self.config_class(**config)
| {"golden_diff": "diff --git a/rastervision/core/config.py b/rastervision/core/config.py\n--- a/rastervision/core/config.py\n+++ b/rastervision/core/config.py\n@@ -98,7 +98,7 @@\n \"\"\"Returns the configuration that is built by this builder.\n \"\"\"\n self.validate()\n- arguments = set(inspect.getargspec(self.config_class).args)\n+ arguments = set(inspect.getfullargspec(self.config_class).args)\n keys = set(self.config.keys())\n config = {k: self.config[k] for k in (arguments & keys)}\n return self.config_class(**config)\n", "issue": "Config builders cannot have type annotations in __init__\nTrying to build [a config class](https://github.com/raster-foundry/raster-vision-plugin/blob/996044a503d09d311105d07da98b31284b6a6e91/src/rf_raster_vision_plugin/raster_source/config.py) with type annotations, you get:\r\n\r\n```\r\nIn [7]: RfRasterSourceConfigBuilder(RfRasterSourceConfig).build()\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n<ipython-input-7-2f92db6db3a6> in <module>()\r\n----> 1 RfRasterSourceConfigBuilder(RfRasterSourceConfig).build()\r\n\r\n/opt/src/rastervision/core/config.py in build(self)\r\n 99 \"\"\"\r\n 100 self.validate()\r\n--> 101 arguments = set(inspect.getargspec(self.config_class).args)\r\n 102 keys = set(self.config.keys())\r\n 103 config = {k: self.config[k] for k in (arguments & keys)}\r\n\r\n/usr/lib/python3.5/inspect.py in getargspec(func)\r\n 1043 getfullargspec(func)\r\n 1044 if kwonlyargs or ann:\r\n-> 1045 raise ValueError(\"Function has keyword-only arguments or annotations\"\r\n 1046 \", use getfullargspec() API which can support them\")\r\n 1047 return ArgSpec(args, varargs, varkw, defaults)\r\n\r\nValueError: Function has keyword-only arguments or annotations, use getfullargspec() API which can support them\r\n```\r\n\r\nReproduction\r\n-----\r\n\r\n- `docker/run` from the linked repo\r\n- `ipython`\r\n\r\n```python\r\n>>> from rf_raster_vision_plugin.raster_source.config import RfRasterSourceConfig, RfRasterSourceConfigBuilder\r\n>>> RfRasterSourceConfigBuilder(RfRasterSourceConfig).build()\r\n```\r\n\r\nExpected Behavior\r\n-----\r\n\r\nConfig builder shouldn't choke on type annotations\n", "before_files": [{"content": "from abc import (ABC, abstractmethod)\nimport os\nimport inspect\n\nfrom rastervision.utils.files import download_or_copy\n\n\nclass ConfigError(Exception):\n pass\n\n\nclass Config(ABC):\n @abstractmethod\n def to_builder(self):\n \"\"\"Return a builder based on this config.\n \"\"\"\n pass # pragma: no cover\n\n @abstractmethod\n def to_proto(self):\n \"\"\"Returns the protobuf configuration for this config.\n \"\"\"\n pass # pragma: no cover\n\n def update_for_command(self,\n command_type,\n experiment_config,\n context=None,\n io_def=None):\n \"\"\"Updates this configuration for the given command\n\n Note: While configuration is immutable for client facing operations,\n this is an internal operation and mutates the configuration.\n\n Args:\n command_type: The command type that is currently being\n preprocessed. experiment_config: The experiment configuration\n that this configuration is a part of.\n context: Optional list of parent configurations, to allow for child\n configurations contained in collections to understand their\n context in the experiment configuration.\n\n Returns:\n Nothing. Call should mutate the configuration object itself.\n \"\"\"\n pass # pragma: no cover\n\n @abstractmethod\n def report_io(self, command_type, io_def):\n \"\"\"Updates the given CommandIODefinition.\n\n So that it includes the inputs, outputs, and missing files for this\n configuration at this command.\n\n Args:\n command_type: The command type that is currently being preprocessed.\n io_def: The CommandIODefinition that this call should modify.\n\n Returns: Nothing. This call should make the appropriate calls to the\n given io_def to mutate its state.\n \"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def builder():\n \"\"\"Returns a new builder that takes this configuration\n as its starting point.\n \"\"\"\n pass # pragma: no cover\n\n @staticmethod\n @abstractmethod\n def from_proto(msg):\n \"\"\"Creates a Config from the specificed protobuf message\n TODO: Allow loading from file uri or dict\n \"\"\"\n pass # pragma: no cover\n\n\nclass ConfigBuilder(ABC):\n def __init__(self, config_class, config=None):\n \"\"\"Construct a builder.\n\n Args:\n config_class: The Config class that this builder builds.\n config: A dictionary of **kwargs that will eventually be passed\n into the __init__ method of config_class to build the configuration.\n This config is modified with the fluent builder methods.\n \"\"\"\n if config is None: # pragma: no cover\n config = {}\n\n self.config_class = config_class\n self.config = config\n\n def build(self):\n \"\"\"Returns the configuration that is built by this builder.\n \"\"\"\n self.validate()\n arguments = set(inspect.getargspec(self.config_class).args)\n keys = set(self.config.keys())\n config = {k: self.config[k] for k in (arguments & keys)}\n return self.config_class(**config)\n\n def validate(self):\n \"\"\"Validate this config, if there is validation on the builder that\n is not captured by the required arguments of the config.\n \"\"\"\n pass # pragma: no cover\n\n @abstractmethod\n def from_proto(self, msg):\n \"\"\"Return a builder that takes the configuration from the proto message\n as its starting point.\n \"\"\"\n pass # pragma: no cover\n\n\nclass BundledConfigMixin(ABC):\n \"\"\"Mixin for configurations that participate in the bundling of a\n prediction package\"\"\"\n\n @abstractmethod\n def save_bundle_files(self, bundle_dir):\n \"\"\"Place files into a bundle directory for bundling into\n a prediction package.\n\n Returns: A tuple of (config, uris) of the modified configuration\n with the basenames of URIs in place of the original URIs,\n and a list of URIs that are to be bundled.\n \"\"\"\n pass # pragma: no cover\n\n def bundle_file(self, uri, bundle_dir):\n local_path = download_or_copy(uri, bundle_dir)\n base_name = os.path.basename(local_path)\n return (local_path, base_name)\n\n @abstractmethod\n def load_bundle_files(self, bundle_dir):\n \"\"\"Load files from a prediction package bundle directory.\"\"\"\n pass # pragma: no cover\n", "path": "rastervision/core/config.py"}], "after_files": [{"content": "from abc import (ABC, abstractmethod)\nimport os\nimport inspect\n\nfrom rastervision.utils.files import download_or_copy\n\n\nclass ConfigError(Exception):\n pass\n\n\nclass Config(ABC):\n @abstractmethod\n def to_builder(self):\n \"\"\"Return a builder based on this config.\n \"\"\"\n pass # pragma: no cover\n\n @abstractmethod\n def to_proto(self):\n \"\"\"Returns the protobuf configuration for this config.\n \"\"\"\n pass # pragma: no cover\n\n def update_for_command(self,\n command_type,\n experiment_config,\n context=None,\n io_def=None):\n \"\"\"Updates this configuration for the given command\n\n Note: While configuration is immutable for client facing operations,\n this is an internal operation and mutates the configuration.\n\n Args:\n command_type: The command type that is currently being\n preprocessed. experiment_config: The experiment configuration\n that this configuration is a part of.\n context: Optional list of parent configurations, to allow for child\n configurations contained in collections to understand their\n context in the experiment configuration.\n\n Returns:\n Nothing. Call should mutate the configuration object itself.\n \"\"\"\n pass # pragma: no cover\n\n @abstractmethod\n def report_io(self, command_type, io_def):\n \"\"\"Updates the given CommandIODefinition.\n\n So that it includes the inputs, outputs, and missing files for this\n configuration at this command.\n\n Args:\n command_type: The command type that is currently being preprocessed.\n io_def: The CommandIODefinition that this call should modify.\n\n Returns: Nothing. This call should make the appropriate calls to the\n given io_def to mutate its state.\n \"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def builder():\n \"\"\"Returns a new builder that takes this configuration\n as its starting point.\n \"\"\"\n pass # pragma: no cover\n\n @staticmethod\n @abstractmethod\n def from_proto(msg):\n \"\"\"Creates a Config from the specificed protobuf message\n TODO: Allow loading from file uri or dict\n \"\"\"\n pass # pragma: no cover\n\n\nclass ConfigBuilder(ABC):\n def __init__(self, config_class, config=None):\n \"\"\"Construct a builder.\n\n Args:\n config_class: The Config class that this builder builds.\n config: A dictionary of **kwargs that will eventually be passed\n into the __init__ method of config_class to build the configuration.\n This config is modified with the fluent builder methods.\n \"\"\"\n if config is None: # pragma: no cover\n config = {}\n\n self.config_class = config_class\n self.config = config\n\n def build(self):\n \"\"\"Returns the configuration that is built by this builder.\n \"\"\"\n self.validate()\n arguments = set(inspect.getfullargspec(self.config_class).args)\n keys = set(self.config.keys())\n config = {k: self.config[k] for k in (arguments & keys)}\n return self.config_class(**config)\n\n def validate(self):\n \"\"\"Validate this config, if there is validation on the builder that\n is not captured by the required arguments of the config.\n \"\"\"\n pass # pragma: no cover\n\n @abstractmethod\n def from_proto(self, msg):\n \"\"\"Return a builder that takes the configuration from the proto message\n as its starting point.\n \"\"\"\n pass # pragma: no cover\n\n\nclass BundledConfigMixin(ABC):\n \"\"\"Mixin for configurations that participate in the bundling of a\n prediction package\"\"\"\n\n @abstractmethod\n def save_bundle_files(self, bundle_dir):\n \"\"\"Place files into a bundle directory for bundling into\n a prediction package.\n\n Returns: A tuple of (config, uris) of the modified configuration\n with the basenames of URIs in place of the original URIs,\n and a list of URIs that are to be bundled.\n \"\"\"\n pass # pragma: no cover\n\n def bundle_file(self, uri, bundle_dir):\n local_path = download_or_copy(uri, bundle_dir)\n base_name = os.path.basename(local_path)\n return (local_path, base_name)\n\n @abstractmethod\n def load_bundle_files(self, bundle_dir):\n \"\"\"Load files from a prediction package bundle directory.\"\"\"\n pass # pragma: no cover\n", "path": "rastervision/core/config.py"}]} | 1,981 | 137 |
gh_patches_debug_33223 | rasdani/github-patches | git_diff | lra__mackup-1292 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mojave, new workstation
Hi, I'm on a new workstation with Dropbox installed. I installed mackup with pip and ran 'mackup restore' and got this:
Traceback (most recent call last):
File "/usr/local/bin/mackup", line 9, in <module>
load_entry_point('mackup==0.8.20', 'console_scripts', 'mackup')()
File "/Library/Python/2.7/site-packages/mackup/main.py", line 65, in main
app_db = ApplicationsDatabase()
File "/Library/Python/2.7/site-packages/mackup/appsdb.py", line 63, in __init__
.format(xdg_config_home))
ValueError: $XDG_CONFIG_HOME: /Users/stephens/.config does not exist
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mackup/appsdb.py`
Content:
```
1 """
2 The applications database.
3
4 The Applications Database provides an easy to use interface to load application
5 data from the Mackup Database (files).
6 """
7 import os
8
9 try:
10 import configparser
11 except ImportError:
12 import ConfigParser as configparser
13
14
15 from .constants import APPS_DIR
16 from .constants import CUSTOM_APPS_DIR
17
18
19 class ApplicationsDatabase(object):
20
21 """Database containing all the configured applications."""
22
23 def __init__(self):
24 """Create a ApplicationsDatabase instance."""
25 # Build the dict that will contain the properties of each application
26 self.apps = dict()
27
28 for config_file in ApplicationsDatabase.get_config_files():
29 config = configparser.SafeConfigParser(allow_no_value=True)
30
31 # Needed to not lowercase the configuration_files in the ini files
32 config.optionxform = str
33
34 if config.read(config_file):
35 # Get the filename without the directory name
36 filename = os.path.basename(config_file)
37 # The app name is the cfg filename with the extension
38 app_name = filename[:-len('.cfg')]
39
40 # Start building a dict for this app
41 self.apps[app_name] = dict()
42
43 # Add the fancy name for the app, for display purpose
44 app_pretty_name = config.get('application', 'name')
45 self.apps[app_name]['name'] = app_pretty_name
46
47 # Add the configuration files to sync
48 self.apps[app_name]['configuration_files'] = set()
49 if config.has_section('configuration_files'):
50 for path in config.options('configuration_files'):
51 if path.startswith('/'):
52 raise ValueError('Unsupported absolute path: {}'
53 .format(path))
54 self.apps[app_name]['configuration_files'].add(path)
55
56 # Add the XDG configuration files to sync
57 home = os.path.expanduser('~/')
58 failobj = "{}.config".format(home)
59 xdg_config_home = os.environ.get('XDG_CONFIG_HOME', failobj)
60 if xdg_config_home:
61 if not os.path.exists(xdg_config_home):
62 raise ValueError('$XDG_CONFIG_HOME: {} does not exist'
63 .format(xdg_config_home))
64 if not xdg_config_home.startswith(home):
65 raise ValueError('$XDG_CONFIG_HOME: {} must be '
66 'somewhere within your home '
67 'directory: {}'
68 .format(xdg_config_home, home))
69 if config.has_section('xdg_configuration_files'):
70 for path in config.options('xdg_configuration_files'):
71 if path.startswith('/'):
72 raise ValueError('Unsupported absolute path: '
73 '{}'
74 .format(path))
75 path = os.path.join(xdg_config_home, path)
76 path = path.replace(home, '')
77 (self.apps[app_name]['configuration_files']
78 .add(path))
79
80 @staticmethod
81 def get_config_files():
82 """
83 Return the application configuration files.
84
85 Return a list of configuration files describing the apps supported by
86 Mackup. The files return are absolute full path to those files.
87 e.g. /usr/lib/mackup/applications/bash.cfg
88
89 Only one config file per application should be returned, custom config
90 having a priority over stock config.
91
92 Returns:
93 set of strings.
94 """
95 # Configure the config parser
96 apps_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
97 APPS_DIR)
98 custom_apps_dir = os.path.join(os.environ['HOME'], CUSTOM_APPS_DIR)
99
100 # List of stock application config files
101 config_files = set()
102
103 # Temp list of user added app config file names
104 custom_files = set()
105
106 # Get the list of custom application config files first
107 if os.path.isdir(custom_apps_dir):
108 for filename in os.listdir(custom_apps_dir):
109 if filename.endswith('.cfg'):
110 config_files.add(os.path.join(custom_apps_dir,
111 filename))
112 # Also add it to the set of custom apps, so that we don't
113 # add the stock config for the same app too
114 custom_files.add(filename)
115
116 # Add the default provided app config files, but only if those are not
117 # customized, as we don't want to overwrite custom app config.
118 for filename in os.listdir(apps_dir):
119 if filename.endswith('.cfg') and filename not in custom_files:
120 config_files.add(os.path.join(apps_dir, filename))
121
122 return config_files
123
124 def get_name(self, name):
125 """
126 Return the fancy name of an application.
127
128 Args:
129 name (str)
130
131 Returns:
132 str
133 """
134 return self.apps[name]['name']
135
136 def get_files(self, name):
137 """
138 Return the list of config files of an application.
139
140 Args:
141 name (str)
142
143 Returns:
144 set of str.
145 """
146 return self.apps[name]['configuration_files']
147
148 def get_app_names(self):
149 """
150 Return application names.
151
152 Return the list of application names that are available in the
153 database.
154
155 Returns:
156 set of str.
157 """
158 app_names = set()
159 for name in self.apps:
160 app_names.add(name)
161
162 return app_names
163
164 def get_pretty_app_names(self):
165 """
166 Return the list of pretty app names that are available in the database.
167
168 Returns:
169 set of str.
170 """
171 pretty_app_names = set()
172 for app_name in self.get_app_names():
173 pretty_app_names.add(self.get_name(app_name))
174
175 return pretty_app_names
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mackup/appsdb.py b/mackup/appsdb.py
--- a/mackup/appsdb.py
+++ b/mackup/appsdb.py
@@ -57,25 +57,21 @@
home = os.path.expanduser('~/')
failobj = "{}.config".format(home)
xdg_config_home = os.environ.get('XDG_CONFIG_HOME', failobj)
- if xdg_config_home:
- if not os.path.exists(xdg_config_home):
- raise ValueError('$XDG_CONFIG_HOME: {} does not exist'
- .format(xdg_config_home))
- if not xdg_config_home.startswith(home):
- raise ValueError('$XDG_CONFIG_HOME: {} must be '
- 'somewhere within your home '
- 'directory: {}'
- .format(xdg_config_home, home))
- if config.has_section('xdg_configuration_files'):
- for path in config.options('xdg_configuration_files'):
- if path.startswith('/'):
- raise ValueError('Unsupported absolute path: '
- '{}'
- .format(path))
- path = os.path.join(xdg_config_home, path)
- path = path.replace(home, '')
- (self.apps[app_name]['configuration_files']
- .add(path))
+ if not xdg_config_home.startswith(home):
+ raise ValueError('$XDG_CONFIG_HOME: {} must be '
+ 'somewhere within your home '
+ 'directory: {}'
+ .format(xdg_config_home, home))
+ if config.has_section('xdg_configuration_files'):
+ for path in config.options('xdg_configuration_files'):
+ if path.startswith('/'):
+ raise ValueError('Unsupported absolute path: '
+ '{}'
+ .format(path))
+ path = os.path.join(xdg_config_home, path)
+ path = path.replace(home, '')
+ (self.apps[app_name]['configuration_files']
+ .add(path))
@staticmethod
def get_config_files():
| {"golden_diff": "diff --git a/mackup/appsdb.py b/mackup/appsdb.py\n--- a/mackup/appsdb.py\n+++ b/mackup/appsdb.py\n@@ -57,25 +57,21 @@\n home = os.path.expanduser('~/')\n failobj = \"{}.config\".format(home)\n xdg_config_home = os.environ.get('XDG_CONFIG_HOME', failobj)\n- if xdg_config_home:\n- if not os.path.exists(xdg_config_home):\n- raise ValueError('$XDG_CONFIG_HOME: {} does not exist'\n- .format(xdg_config_home))\n- if not xdg_config_home.startswith(home):\n- raise ValueError('$XDG_CONFIG_HOME: {} must be '\n- 'somewhere within your home '\n- 'directory: {}'\n- .format(xdg_config_home, home))\n- if config.has_section('xdg_configuration_files'):\n- for path in config.options('xdg_configuration_files'):\n- if path.startswith('/'):\n- raise ValueError('Unsupported absolute path: '\n- '{}'\n- .format(path))\n- path = os.path.join(xdg_config_home, path)\n- path = path.replace(home, '')\n- (self.apps[app_name]['configuration_files']\n- .add(path))\n+ if not xdg_config_home.startswith(home):\n+ raise ValueError('$XDG_CONFIG_HOME: {} must be '\n+ 'somewhere within your home '\n+ 'directory: {}'\n+ .format(xdg_config_home, home))\n+ if config.has_section('xdg_configuration_files'):\n+ for path in config.options('xdg_configuration_files'):\n+ if path.startswith('/'):\n+ raise ValueError('Unsupported absolute path: '\n+ '{}'\n+ .format(path))\n+ path = os.path.join(xdg_config_home, path)\n+ path = path.replace(home, '')\n+ (self.apps[app_name]['configuration_files']\n+ .add(path))\n \n @staticmethod\n def get_config_files():\n", "issue": "Mojave, new workstation\nHi, I'm on a new workstation with Dropbox installed. I installed mackup with pip and ran 'mackup restore' and got this:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/mackup\", line 9, in <module>\r\n load_entry_point('mackup==0.8.20', 'console_scripts', 'mackup')()\r\n File \"/Library/Python/2.7/site-packages/mackup/main.py\", line 65, in main\r\n app_db = ApplicationsDatabase()\r\n File \"/Library/Python/2.7/site-packages/mackup/appsdb.py\", line 63, in __init__\r\n .format(xdg_config_home))\r\nValueError: $XDG_CONFIG_HOME: /Users/stephens/.config does not exist\n", "before_files": [{"content": "\"\"\"\nThe applications database.\n\nThe Applications Database provides an easy to use interface to load application\ndata from the Mackup Database (files).\n\"\"\"\nimport os\n\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\n\nfrom .constants import APPS_DIR\nfrom .constants import CUSTOM_APPS_DIR\n\n\nclass ApplicationsDatabase(object):\n\n \"\"\"Database containing all the configured applications.\"\"\"\n\n def __init__(self):\n \"\"\"Create a ApplicationsDatabase instance.\"\"\"\n # Build the dict that will contain the properties of each application\n self.apps = dict()\n\n for config_file in ApplicationsDatabase.get_config_files():\n config = configparser.SafeConfigParser(allow_no_value=True)\n\n # Needed to not lowercase the configuration_files in the ini files\n config.optionxform = str\n\n if config.read(config_file):\n # Get the filename without the directory name\n filename = os.path.basename(config_file)\n # The app name is the cfg filename with the extension\n app_name = filename[:-len('.cfg')]\n\n # Start building a dict for this app\n self.apps[app_name] = dict()\n\n # Add the fancy name for the app, for display purpose\n app_pretty_name = config.get('application', 'name')\n self.apps[app_name]['name'] = app_pretty_name\n\n # Add the configuration files to sync\n self.apps[app_name]['configuration_files'] = set()\n if config.has_section('configuration_files'):\n for path in config.options('configuration_files'):\n if path.startswith('/'):\n raise ValueError('Unsupported absolute path: {}'\n .format(path))\n self.apps[app_name]['configuration_files'].add(path)\n\n # Add the XDG configuration files to sync\n home = os.path.expanduser('~/')\n failobj = \"{}.config\".format(home)\n xdg_config_home = os.environ.get('XDG_CONFIG_HOME', failobj)\n if xdg_config_home:\n if not os.path.exists(xdg_config_home):\n raise ValueError('$XDG_CONFIG_HOME: {} does not exist'\n .format(xdg_config_home))\n if not xdg_config_home.startswith(home):\n raise ValueError('$XDG_CONFIG_HOME: {} must be '\n 'somewhere within your home '\n 'directory: {}'\n .format(xdg_config_home, home))\n if config.has_section('xdg_configuration_files'):\n for path in config.options('xdg_configuration_files'):\n if path.startswith('/'):\n raise ValueError('Unsupported absolute path: '\n '{}'\n .format(path))\n path = os.path.join(xdg_config_home, path)\n path = path.replace(home, '')\n (self.apps[app_name]['configuration_files']\n .add(path))\n\n @staticmethod\n def get_config_files():\n \"\"\"\n Return the application configuration files.\n\n Return a list of configuration files describing the apps supported by\n Mackup. The files return are absolute full path to those files.\n e.g. /usr/lib/mackup/applications/bash.cfg\n\n Only one config file per application should be returned, custom config\n having a priority over stock config.\n\n Returns:\n set of strings.\n \"\"\"\n # Configure the config parser\n apps_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n APPS_DIR)\n custom_apps_dir = os.path.join(os.environ['HOME'], CUSTOM_APPS_DIR)\n\n # List of stock application config files\n config_files = set()\n\n # Temp list of user added app config file names\n custom_files = set()\n\n # Get the list of custom application config files first\n if os.path.isdir(custom_apps_dir):\n for filename in os.listdir(custom_apps_dir):\n if filename.endswith('.cfg'):\n config_files.add(os.path.join(custom_apps_dir,\n filename))\n # Also add it to the set of custom apps, so that we don't\n # add the stock config for the same app too\n custom_files.add(filename)\n\n # Add the default provided app config files, but only if those are not\n # customized, as we don't want to overwrite custom app config.\n for filename in os.listdir(apps_dir):\n if filename.endswith('.cfg') and filename not in custom_files:\n config_files.add(os.path.join(apps_dir, filename))\n\n return config_files\n\n def get_name(self, name):\n \"\"\"\n Return the fancy name of an application.\n\n Args:\n name (str)\n\n Returns:\n str\n \"\"\"\n return self.apps[name]['name']\n\n def get_files(self, name):\n \"\"\"\n Return the list of config files of an application.\n\n Args:\n name (str)\n\n Returns:\n set of str.\n \"\"\"\n return self.apps[name]['configuration_files']\n\n def get_app_names(self):\n \"\"\"\n Return application names.\n\n Return the list of application names that are available in the\n database.\n\n Returns:\n set of str.\n \"\"\"\n app_names = set()\n for name in self.apps:\n app_names.add(name)\n\n return app_names\n\n def get_pretty_app_names(self):\n \"\"\"\n Return the list of pretty app names that are available in the database.\n\n Returns:\n set of str.\n \"\"\"\n pretty_app_names = set()\n for app_name in self.get_app_names():\n pretty_app_names.add(self.get_name(app_name))\n\n return pretty_app_names\n", "path": "mackup/appsdb.py"}], "after_files": [{"content": "\"\"\"\nThe applications database.\n\nThe Applications Database provides an easy to use interface to load application\ndata from the Mackup Database (files).\n\"\"\"\nimport os\n\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\n\nfrom .constants import APPS_DIR\nfrom .constants import CUSTOM_APPS_DIR\n\n\nclass ApplicationsDatabase(object):\n\n \"\"\"Database containing all the configured applications.\"\"\"\n\n def __init__(self):\n \"\"\"Create a ApplicationsDatabase instance.\"\"\"\n # Build the dict that will contain the properties of each application\n self.apps = dict()\n\n for config_file in ApplicationsDatabase.get_config_files():\n config = configparser.SafeConfigParser(allow_no_value=True)\n\n # Needed to not lowercase the configuration_files in the ini files\n config.optionxform = str\n\n if config.read(config_file):\n # Get the filename without the directory name\n filename = os.path.basename(config_file)\n # The app name is the cfg filename with the extension\n app_name = filename[:-len('.cfg')]\n\n # Start building a dict for this app\n self.apps[app_name] = dict()\n\n # Add the fancy name for the app, for display purpose\n app_pretty_name = config.get('application', 'name')\n self.apps[app_name]['name'] = app_pretty_name\n\n # Add the configuration files to sync\n self.apps[app_name]['configuration_files'] = set()\n if config.has_section('configuration_files'):\n for path in config.options('configuration_files'):\n if path.startswith('/'):\n raise ValueError('Unsupported absolute path: {}'\n .format(path))\n self.apps[app_name]['configuration_files'].add(path)\n\n # Add the XDG configuration files to sync\n home = os.path.expanduser('~/')\n failobj = \"{}.config\".format(home)\n xdg_config_home = os.environ.get('XDG_CONFIG_HOME', failobj)\n if not xdg_config_home.startswith(home):\n raise ValueError('$XDG_CONFIG_HOME: {} must be '\n 'somewhere within your home '\n 'directory: {}'\n .format(xdg_config_home, home))\n if config.has_section('xdg_configuration_files'):\n for path in config.options('xdg_configuration_files'):\n if path.startswith('/'):\n raise ValueError('Unsupported absolute path: '\n '{}'\n .format(path))\n path = os.path.join(xdg_config_home, path)\n path = path.replace(home, '')\n (self.apps[app_name]['configuration_files']\n .add(path))\n\n @staticmethod\n def get_config_files():\n \"\"\"\n Return the application configuration files.\n\n Return a list of configuration files describing the apps supported by\n Mackup. The files return are absolute full path to those files.\n e.g. /usr/lib/mackup/applications/bash.cfg\n\n Only one config file per application should be returned, custom config\n having a priority over stock config.\n\n Returns:\n set of strings.\n \"\"\"\n # Configure the config parser\n apps_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n APPS_DIR)\n custom_apps_dir = os.path.join(os.environ['HOME'], CUSTOM_APPS_DIR)\n\n # List of stock application config files\n config_files = set()\n\n # Temp list of user added app config file names\n custom_files = set()\n\n # Get the list of custom application config files first\n if os.path.isdir(custom_apps_dir):\n for filename in os.listdir(custom_apps_dir):\n if filename.endswith('.cfg'):\n config_files.add(os.path.join(custom_apps_dir,\n filename))\n # Also add it to the set of custom apps, so that we don't\n # add the stock config for the same app too\n custom_files.add(filename)\n\n # Add the default provided app config files, but only if those are not\n # customized, as we don't want to overwrite custom app config.\n for filename in os.listdir(apps_dir):\n if filename.endswith('.cfg') and filename not in custom_files:\n config_files.add(os.path.join(apps_dir, filename))\n\n return config_files\n\n def get_name(self, name):\n \"\"\"\n Return the fancy name of an application.\n\n Args:\n name (str)\n\n Returns:\n str\n \"\"\"\n return self.apps[name]['name']\n\n def get_files(self, name):\n \"\"\"\n Return the list of config files of an application.\n\n Args:\n name (str)\n\n Returns:\n set of str.\n \"\"\"\n return self.apps[name]['configuration_files']\n\n def get_app_names(self):\n \"\"\"\n Return application names.\n\n Return the list of application names that are available in the\n database.\n\n Returns:\n set of str.\n \"\"\"\n app_names = set()\n for name in self.apps:\n app_names.add(name)\n\n return app_names\n\n def get_pretty_app_names(self):\n \"\"\"\n Return the list of pretty app names that are available in the database.\n\n Returns:\n set of str.\n \"\"\"\n pretty_app_names = set()\n for app_name in self.get_app_names():\n pretty_app_names.add(self.get_name(app_name))\n\n return pretty_app_names\n", "path": "mackup/appsdb.py"}]} | 2,023 | 434 |
gh_patches_debug_102 | rasdani/github-patches | git_diff | scipy__scipy-17210 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: Build failure due to problems with shebang line in cythoner.py
I ran into a problem running `dev.py` that appears to be caused by the shebang line `#!python3` in the file `scipy/_build_utils/cythoner.py`. If I change it to `#!/usr/bin/env python` then the build works fine.
Most files in scipy with a shebang line use `#!/usr/bin/env python`. Only files in the `_build_utils` use `#!python3`.
Error message when running `python dev.py build`:
```shell
Meson build setup OK
💻 ninja -C /mnt/c/Users/Jozsef/OSS/scipy-test/build
ninja: Entering directory `/mnt/c/Users/Jozsef/OSS/scipy-test/build'
[3/1562] Generating 'scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c'.
FAILED: scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c
/mnt/c/Users/Jozsef/OSS/scipy-test/scipy/_build_utils/cythoner.py ../scipy/_lib/_ccallback_c.pyx scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c
/bin/sh: 1: /mnt/c/Users/Jozsef/OSS/scipy-test/scipy/_build_utils/cythoner.py: not found
[12/1562] Compiling C++ object scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so.p/_uarray_dispatch.cxx.o
ninja: build stopped: subcommand failed.
Build failed!
```
If I try running `cythoner.py` directly:
```shell
-bash: /mnt/c/Users/Jozsef/OSS/scipy-conda/scipy/_build_utils/cythoner.py: python3: bad interpreter: No such file or directory
```
I'm using conda with WSL (Ubuntu).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scipy/_build_utils/cythoner.py`
Content:
```
1 #!python3
2 """ Scipy variant of Cython command
3
4 Cython, as applied to single pyx file.
5
6 Expects two arguments, infile and outfile.
7
8 Other options passed through to cython command line parser.
9 """
10
11 import os
12 import os.path as op
13 import sys
14 import subprocess as sbp
15
16
17 def main():
18 in_fname, out_fname = (op.abspath(p) for p in sys.argv[1:3])
19
20 sbp.run(['cython', '-3', '--fast-fail',
21 '--output-file', out_fname,
22 '--include-dir', os.getcwd()] +
23 sys.argv[3:] + [in_fname],
24 check=True)
25
26
27 if __name__ == '__main__':
28 main()
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scipy/_build_utils/cythoner.py b/scipy/_build_utils/cythoner.py
--- a/scipy/_build_utils/cythoner.py
+++ b/scipy/_build_utils/cythoner.py
@@ -1,4 +1,4 @@
-#!python3
+#!/usr/bin/env python3
""" Scipy variant of Cython command
Cython, as applied to single pyx file.
| {"golden_diff": "diff --git a/scipy/_build_utils/cythoner.py b/scipy/_build_utils/cythoner.py\n--- a/scipy/_build_utils/cythoner.py\n+++ b/scipy/_build_utils/cythoner.py\n@@ -1,4 +1,4 @@\n-#!python3\n+#!/usr/bin/env python3\n \"\"\" Scipy variant of Cython command\n \n Cython, as applied to single pyx file.\n", "issue": "BUG: Build failure due to problems with shebang line in cythoner.py\nI ran into a problem running `dev.py` that appears to be caused by the shebang line `#!python3` in the file `scipy/_build_utils/cythoner.py`. If I change it to `#!/usr/bin/env python` then the build works fine.\r\n\r\nMost files in scipy with a shebang line use `#!/usr/bin/env python`. Only files in the `_build_utils` use `#!python3`.\r\n\r\nError message when running `python dev.py build`:\r\n\r\n```shell\r\nMeson build setup OK\r\n\ud83d\udcbb ninja -C /mnt/c/Users/Jozsef/OSS/scipy-test/build\r\nninja: Entering directory `/mnt/c/Users/Jozsef/OSS/scipy-test/build'\r\n[3/1562] Generating 'scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c'.\r\nFAILED: scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c\r\n/mnt/c/Users/Jozsef/OSS/scipy-test/scipy/_build_utils/cythoner.py ../scipy/_lib/_ccallback_c.pyx scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c\r\n/bin/sh: 1: /mnt/c/Users/Jozsef/OSS/scipy-test/scipy/_build_utils/cythoner.py: not found\r\n[12/1562] Compiling C++ object scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so.p/_uarray_dispatch.cxx.o\r\nninja: build stopped: subcommand failed.\r\nBuild failed!\r\n```\r\n\r\nIf I try running `cythoner.py` directly:\r\n\r\n```shell\r\n-bash: /mnt/c/Users/Jozsef/OSS/scipy-conda/scipy/_build_utils/cythoner.py: python3: bad interpreter: No such file or directory\r\n```\r\n\r\nI'm using conda with WSL (Ubuntu).\n", "before_files": [{"content": "#!python3\n\"\"\" Scipy variant of Cython command\n\nCython, as applied to single pyx file.\n\nExpects two arguments, infile and outfile.\n\nOther options passed through to cython command line parser.\n\"\"\"\n\nimport os\nimport os.path as op\nimport sys\nimport subprocess as sbp\n\n\ndef main():\n in_fname, out_fname = (op.abspath(p) for p in sys.argv[1:3])\n\n sbp.run(['cython', '-3', '--fast-fail',\n '--output-file', out_fname,\n '--include-dir', os.getcwd()] +\n sys.argv[3:] + [in_fname],\n check=True)\n\n\nif __name__ == '__main__':\n main()\n", "path": "scipy/_build_utils/cythoner.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\"\"\" Scipy variant of Cython command\n\nCython, as applied to single pyx file.\n\nExpects two arguments, infile and outfile.\n\nOther options passed through to cython command line parser.\n\"\"\"\n\nimport os\nimport os.path as op\nimport sys\nimport subprocess as sbp\n\n\ndef main():\n in_fname, out_fname = (op.abspath(p) for p in sys.argv[1:3])\n\n sbp.run(['cython', '-3', '--fast-fail',\n '--output-file', out_fname,\n '--include-dir', os.getcwd()] +\n sys.argv[3:] + [in_fname],\n check=True)\n\n\nif __name__ == '__main__':\n main()\n", "path": "scipy/_build_utils/cythoner.py"}]} | 932 | 91 |
gh_patches_debug_28238 | rasdani/github-patches | git_diff | falconry__falcon-1785 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Custom media handlers: Unexpected issue when providing custom json handler
This is in falcon-2.0
Look at the documentation [here][1] for using rapidjson for encoding/decoding json. By providing:
`extra_handlers={'application/json': json_handler}` we are still left with the default handler for content-type `application-json; charset=UTF-8`. This results in an unexpected behaviour when some client library (e.g. Retrofit for Android) includes the charset in the header.
While the documentation should be updated, the expected behaviour is that if the handler for `application/json` is updated - it should also update the handler for variant with charset (or at least throw a warning) otherwise there is a possibility of hidden bugs.
[1]: https://falcon.readthedocs.io/en/stable/api/media.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/media/handlers.py`
Content:
```
1 from collections import UserDict
2
3 from falcon import errors
4 from falcon.constants import MEDIA_MULTIPART, MEDIA_URLENCODED
5 from falcon.media.json import JSONHandler
6 from falcon.media.multipart import MultipartFormHandler, MultipartParseOptions
7 from falcon.media.urlencoded import URLEncodedFormHandler
8 from falcon.vendor import mimeparse
9
10
11 class Handlers(UserDict):
12 """A :class:`dict`-like object that manages Internet media type handlers."""
13 def __init__(self, initial=None):
14 handlers = initial or {
15 'application/json': JSONHandler(),
16 'application/json; charset=UTF-8': JSONHandler(),
17 MEDIA_MULTIPART: MultipartFormHandler(),
18 MEDIA_URLENCODED: URLEncodedFormHandler(),
19 }
20
21 # NOTE(jmvrbanac): Directly calling UserDict as it's not inheritable.
22 # Also, this results in self.update(...) being called.
23 UserDict.__init__(self, handlers)
24
25 def _resolve_media_type(self, media_type, all_media_types):
26 resolved = None
27
28 try:
29 # NOTE(jmvrbanac): Mimeparse will return an empty string if it can
30 # parse the media type, but cannot find a suitable type.
31 resolved = mimeparse.best_match(
32 all_media_types,
33 media_type
34 )
35 except ValueError:
36 pass
37
38 return resolved
39
40 def find_by_media_type(self, media_type, default):
41 # PERF(jmvrbanac): Check via a quick methods first for performance
42 if media_type == '*/*' or not media_type:
43 media_type = default
44
45 try:
46 return self.data[media_type]
47 except KeyError:
48 pass
49
50 # PERF(jmvrbanac): Fallback to the slower method
51 resolved = self._resolve_media_type(media_type, self.data.keys())
52
53 if not resolved:
54 raise errors.HTTPUnsupportedMediaType(
55 description='{0} is an unsupported media type.'.format(media_type)
56 )
57
58 return self.data[resolved]
59
60
61 # NOTE(vytas): An ugly way to work around circular imports.
62 MultipartParseOptions._DEFAULT_HANDLERS = Handlers({
63 'application/json': JSONHandler(),
64 'application/json; charset=UTF-8': JSONHandler(),
65 MEDIA_URLENCODED: URLEncodedFormHandler(),
66 }) # type: ignore
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/falcon/media/handlers.py b/falcon/media/handlers.py
--- a/falcon/media/handlers.py
+++ b/falcon/media/handlers.py
@@ -1,7 +1,7 @@
from collections import UserDict
from falcon import errors
-from falcon.constants import MEDIA_MULTIPART, MEDIA_URLENCODED
+from falcon.constants import MEDIA_JSON, MEDIA_MULTIPART, MEDIA_URLENCODED
from falcon.media.json import JSONHandler
from falcon.media.multipart import MultipartFormHandler, MultipartParseOptions
from falcon.media.urlencoded import URLEncodedFormHandler
@@ -12,8 +12,7 @@
"""A :class:`dict`-like object that manages Internet media type handlers."""
def __init__(self, initial=None):
handlers = initial or {
- 'application/json': JSONHandler(),
- 'application/json; charset=UTF-8': JSONHandler(),
+ MEDIA_JSON: JSONHandler(),
MEDIA_MULTIPART: MultipartFormHandler(),
MEDIA_URLENCODED: URLEncodedFormHandler(),
}
@@ -60,7 +59,6 @@
# NOTE(vytas): An ugly way to work around circular imports.
MultipartParseOptions._DEFAULT_HANDLERS = Handlers({
- 'application/json': JSONHandler(),
- 'application/json; charset=UTF-8': JSONHandler(),
+ MEDIA_JSON: JSONHandler(),
MEDIA_URLENCODED: URLEncodedFormHandler(),
}) # type: ignore
| {"golden_diff": "diff --git a/falcon/media/handlers.py b/falcon/media/handlers.py\n--- a/falcon/media/handlers.py\n+++ b/falcon/media/handlers.py\n@@ -1,7 +1,7 @@\n from collections import UserDict\n \n from falcon import errors\n-from falcon.constants import MEDIA_MULTIPART, MEDIA_URLENCODED\n+from falcon.constants import MEDIA_JSON, MEDIA_MULTIPART, MEDIA_URLENCODED\n from falcon.media.json import JSONHandler\n from falcon.media.multipart import MultipartFormHandler, MultipartParseOptions\n from falcon.media.urlencoded import URLEncodedFormHandler\n@@ -12,8 +12,7 @@\n \"\"\"A :class:`dict`-like object that manages Internet media type handlers.\"\"\"\n def __init__(self, initial=None):\n handlers = initial or {\n- 'application/json': JSONHandler(),\n- 'application/json; charset=UTF-8': JSONHandler(),\n+ MEDIA_JSON: JSONHandler(),\n MEDIA_MULTIPART: MultipartFormHandler(),\n MEDIA_URLENCODED: URLEncodedFormHandler(),\n }\n@@ -60,7 +59,6 @@\n \n # NOTE(vytas): An ugly way to work around circular imports.\n MultipartParseOptions._DEFAULT_HANDLERS = Handlers({\n- 'application/json': JSONHandler(),\n- 'application/json; charset=UTF-8': JSONHandler(),\n+ MEDIA_JSON: JSONHandler(),\n MEDIA_URLENCODED: URLEncodedFormHandler(),\n }) # type: ignore\n", "issue": "Custom media handlers: Unexpected issue when providing custom json handler\nThis is in falcon-2.0\r\n\r\nLook at the documentation [here][1] for using rapidjson for encoding/decoding json. By providing:\r\n\r\n`extra_handlers={'application/json': json_handler}` we are still left with the default handler for content-type `application-json; charset=UTF-8`. This results in an unexpected behaviour when some client library (e.g. Retrofit for Android) includes the charset in the header. \r\n\r\nWhile the documentation should be updated, the expected behaviour is that if the handler for `application/json` is updated - it should also update the handler for variant with charset (or at least throw a warning) otherwise there is a possibility of hidden bugs. \r\n\r\n[1]: https://falcon.readthedocs.io/en/stable/api/media.html\n", "before_files": [{"content": "from collections import UserDict\n\nfrom falcon import errors\nfrom falcon.constants import MEDIA_MULTIPART, MEDIA_URLENCODED\nfrom falcon.media.json import JSONHandler\nfrom falcon.media.multipart import MultipartFormHandler, MultipartParseOptions\nfrom falcon.media.urlencoded import URLEncodedFormHandler\nfrom falcon.vendor import mimeparse\n\n\nclass Handlers(UserDict):\n \"\"\"A :class:`dict`-like object that manages Internet media type handlers.\"\"\"\n def __init__(self, initial=None):\n handlers = initial or {\n 'application/json': JSONHandler(),\n 'application/json; charset=UTF-8': JSONHandler(),\n MEDIA_MULTIPART: MultipartFormHandler(),\n MEDIA_URLENCODED: URLEncodedFormHandler(),\n }\n\n # NOTE(jmvrbanac): Directly calling UserDict as it's not inheritable.\n # Also, this results in self.update(...) being called.\n UserDict.__init__(self, handlers)\n\n def _resolve_media_type(self, media_type, all_media_types):\n resolved = None\n\n try:\n # NOTE(jmvrbanac): Mimeparse will return an empty string if it can\n # parse the media type, but cannot find a suitable type.\n resolved = mimeparse.best_match(\n all_media_types,\n media_type\n )\n except ValueError:\n pass\n\n return resolved\n\n def find_by_media_type(self, media_type, default):\n # PERF(jmvrbanac): Check via a quick methods first for performance\n if media_type == '*/*' or not media_type:\n media_type = default\n\n try:\n return self.data[media_type]\n except KeyError:\n pass\n\n # PERF(jmvrbanac): Fallback to the slower method\n resolved = self._resolve_media_type(media_type, self.data.keys())\n\n if not resolved:\n raise errors.HTTPUnsupportedMediaType(\n description='{0} is an unsupported media type.'.format(media_type)\n )\n\n return self.data[resolved]\n\n\n# NOTE(vytas): An ugly way to work around circular imports.\nMultipartParseOptions._DEFAULT_HANDLERS = Handlers({\n 'application/json': JSONHandler(),\n 'application/json; charset=UTF-8': JSONHandler(),\n MEDIA_URLENCODED: URLEncodedFormHandler(),\n}) # type: ignore\n", "path": "falcon/media/handlers.py"}], "after_files": [{"content": "from collections import UserDict\n\nfrom falcon import errors\nfrom falcon.constants import MEDIA_JSON, MEDIA_MULTIPART, MEDIA_URLENCODED\nfrom falcon.media.json import JSONHandler\nfrom falcon.media.multipart import MultipartFormHandler, MultipartParseOptions\nfrom falcon.media.urlencoded import URLEncodedFormHandler\nfrom falcon.vendor import mimeparse\n\n\nclass Handlers(UserDict):\n \"\"\"A :class:`dict`-like object that manages Internet media type handlers.\"\"\"\n def __init__(self, initial=None):\n handlers = initial or {\n MEDIA_JSON: JSONHandler(),\n MEDIA_MULTIPART: MultipartFormHandler(),\n MEDIA_URLENCODED: URLEncodedFormHandler(),\n }\n\n # NOTE(jmvrbanac): Directly calling UserDict as it's not inheritable.\n # Also, this results in self.update(...) being called.\n UserDict.__init__(self, handlers)\n\n def _resolve_media_type(self, media_type, all_media_types):\n resolved = None\n\n try:\n # NOTE(jmvrbanac): Mimeparse will return an empty string if it can\n # parse the media type, but cannot find a suitable type.\n resolved = mimeparse.best_match(\n all_media_types,\n media_type\n )\n except ValueError:\n pass\n\n return resolved\n\n def find_by_media_type(self, media_type, default):\n # PERF(jmvrbanac): Check via a quick methods first for performance\n if media_type == '*/*' or not media_type:\n media_type = default\n\n try:\n return self.data[media_type]\n except KeyError:\n pass\n\n # PERF(jmvrbanac): Fallback to the slower method\n resolved = self._resolve_media_type(media_type, self.data.keys())\n\n if not resolved:\n raise errors.HTTPUnsupportedMediaType(\n description='{0} is an unsupported media type.'.format(media_type)\n )\n\n return self.data[resolved]\n\n\n# NOTE(vytas): An ugly way to work around circular imports.\nMultipartParseOptions._DEFAULT_HANDLERS = Handlers({\n MEDIA_JSON: JSONHandler(),\n MEDIA_URLENCODED: URLEncodedFormHandler(),\n}) # type: ignore\n", "path": "falcon/media/handlers.py"}]} | 1,072 | 340 |
gh_patches_debug_29595 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-7414 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Log level `DEPRECATION` is documented but not working
## Description of the issue
Log level `DEPRECATION` is documented but not working.
### Context information (for bug reports)
* Output of `pyinstaller --version`: ```5.7.0```
* Version of Python: 3.10.6
* Platform: Ubuntu 22.04
* How you installed Python: apt
* Did you also try this on another platform? Does it work there? yes, same thing
* try the latest development version, using the following command: yes, same thing
### A minimal example program which shows the error
```
$ pyinstaller --help | grep -U1 DEPREC
--log-level LEVEL Amount of detail in build-time console messages. LEVEL
may be one of TRACE, DEBUG, INFO, WARN, DEPRECATION,
ERROR, CRITICAL (default: INFO). Also settable via and
$ pyinstaller --log-level DEPRECATION .
[...]
pyinstaller: error: Unknown log level `DEPRECATION`
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/log.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2023, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11 """
12 Logging module for PyInstaller.
13 """
14
15 __all__ = ['getLogger', 'INFO', 'WARN', 'DEBUG', 'TRACE', 'ERROR', 'FATAL', 'DEPRECATION']
16
17 import os
18 import logging
19 from logging import DEBUG, ERROR, FATAL, INFO, WARN, getLogger
20
21 TRACE = logging.TRACE = DEBUG - 5
22 logging.addLevelName(TRACE, 'TRACE')
23 DEPRECATION = WARN + 5
24 logging.addLevelName(DEPRECATION, 'DEPRECATION')
25 LEVELS = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'DEPRECATION', 'ERROR', 'CRITICAL')
26
27 FORMAT = '%(relativeCreated)d %(levelname)s: %(message)s'
28 _env_level = os.environ.get("PYI_LOG_LEVEL", "INFO")
29 try:
30 level = getattr(logging, _env_level.upper())
31 except AttributeError:
32 raise SystemExit(f"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {LEVELS}.")
33 logging.basicConfig(format=FORMAT, level=level)
34 logger = getLogger('PyInstaller')
35
36
37 def __add_options(parser):
38 parser.add_argument(
39 '--log-level',
40 choices=LEVELS,
41 metavar="LEVEL",
42 dest='loglevel',
43 help='Amount of detail in build-time console messages. LEVEL may be one of %s (default: INFO). '
44 'Also settable via and overrides the PYI_LOG_LEVEL environment variable.' % ', '.join(LEVELS),
45 )
46
47
48 def __process_options(parser, opts):
49 if opts.loglevel:
50 try:
51 level = opts.loglevel.upper()
52 _level = getattr(logging, level)
53 except AttributeError:
54 parser.error('Unknown log level `%s`' % opts.loglevel)
55 logger.setLevel(_level)
56 os.environ["PYI_LOG_LEVEL"] = level
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/log.py b/PyInstaller/log.py
--- a/PyInstaller/log.py
+++ b/PyInstaller/log.py
@@ -18,18 +18,26 @@
import logging
from logging import DEBUG, ERROR, FATAL, INFO, WARN, getLogger
-TRACE = logging.TRACE = DEBUG - 5
+TRACE = DEBUG - 5
logging.addLevelName(TRACE, 'TRACE')
DEPRECATION = WARN + 5
logging.addLevelName(DEPRECATION, 'DEPRECATION')
-LEVELS = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'DEPRECATION', 'ERROR', 'CRITICAL')
+LEVELS = {
+ 'TRACE': TRACE,
+ 'DEBUG': DEBUG,
+ 'INFO': INFO,
+ 'WARN': WARN,
+ 'DEPRECATION': DEPRECATION,
+ 'ERROR': ERROR,
+ 'FATAL': FATAL,
+}
FORMAT = '%(relativeCreated)d %(levelname)s: %(message)s'
_env_level = os.environ.get("PYI_LOG_LEVEL", "INFO")
try:
- level = getattr(logging, _env_level.upper())
-except AttributeError:
- raise SystemExit(f"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {LEVELS}.")
+ level = LEVELS[_env_level.upper()]
+except KeyError:
+ raise SystemExit(f"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {list(LEVELS)}.")
logging.basicConfig(format=FORMAT, level=level)
logger = getLogger('PyInstaller')
@@ -49,8 +57,8 @@
if opts.loglevel:
try:
level = opts.loglevel.upper()
- _level = getattr(logging, level)
- except AttributeError:
+ _level = LEVELS[level]
+ except KeyError:
parser.error('Unknown log level `%s`' % opts.loglevel)
logger.setLevel(_level)
os.environ["PYI_LOG_LEVEL"] = level
| {"golden_diff": "diff --git a/PyInstaller/log.py b/PyInstaller/log.py\n--- a/PyInstaller/log.py\n+++ b/PyInstaller/log.py\n@@ -18,18 +18,26 @@\n import logging\n from logging import DEBUG, ERROR, FATAL, INFO, WARN, getLogger\n \n-TRACE = logging.TRACE = DEBUG - 5\n+TRACE = DEBUG - 5\n logging.addLevelName(TRACE, 'TRACE')\n DEPRECATION = WARN + 5\n logging.addLevelName(DEPRECATION, 'DEPRECATION')\n-LEVELS = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'DEPRECATION', 'ERROR', 'CRITICAL')\n+LEVELS = {\n+ 'TRACE': TRACE,\n+ 'DEBUG': DEBUG,\n+ 'INFO': INFO,\n+ 'WARN': WARN,\n+ 'DEPRECATION': DEPRECATION,\n+ 'ERROR': ERROR,\n+ 'FATAL': FATAL,\n+}\n \n FORMAT = '%(relativeCreated)d %(levelname)s: %(message)s'\n _env_level = os.environ.get(\"PYI_LOG_LEVEL\", \"INFO\")\n try:\n- level = getattr(logging, _env_level.upper())\n-except AttributeError:\n- raise SystemExit(f\"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {LEVELS}.\")\n+ level = LEVELS[_env_level.upper()]\n+except KeyError:\n+ raise SystemExit(f\"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {list(LEVELS)}.\")\n logging.basicConfig(format=FORMAT, level=level)\n logger = getLogger('PyInstaller')\n \n@@ -49,8 +57,8 @@\n if opts.loglevel:\n try:\n level = opts.loglevel.upper()\n- _level = getattr(logging, level)\n- except AttributeError:\n+ _level = LEVELS[level]\n+ except KeyError:\n parser.error('Unknown log level `%s`' % opts.loglevel)\n logger.setLevel(_level)\n os.environ[\"PYI_LOG_LEVEL\"] = level\n", "issue": "Log level `DEPRECATION` is documented but not working\n## Description of the issue\r\n\r\nLog level `DEPRECATION` is documented but not working.\r\n\r\n### Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```5.7.0```\r\n* Version of Python: 3.10.6\r\n* Platform: Ubuntu 22.04\r\n* How you installed Python: apt\r\n* Did you also try this on another platform? Does it work there? yes, same thing\r\n* try the latest development version, using the following command: yes, same thing\r\n\r\n### A minimal example program which shows the error\r\n\r\n```\r\n$ pyinstaller --help | grep -U1 DEPREC\r\n --log-level LEVEL Amount of detail in build-time console messages. LEVEL\r\n may be one of TRACE, DEBUG, INFO, WARN, DEPRECATION,\r\n ERROR, CRITICAL (default: INFO). Also settable via and\r\n$ pyinstaller --log-level DEPRECATION .\r\n[...]\r\npyinstaller: error: Unknown log level `DEPRECATION`\r\n```\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2023, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\"\"\"\nLogging module for PyInstaller.\n\"\"\"\n\n__all__ = ['getLogger', 'INFO', 'WARN', 'DEBUG', 'TRACE', 'ERROR', 'FATAL', 'DEPRECATION']\n\nimport os\nimport logging\nfrom logging import DEBUG, ERROR, FATAL, INFO, WARN, getLogger\n\nTRACE = logging.TRACE = DEBUG - 5\nlogging.addLevelName(TRACE, 'TRACE')\nDEPRECATION = WARN + 5\nlogging.addLevelName(DEPRECATION, 'DEPRECATION')\nLEVELS = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'DEPRECATION', 'ERROR', 'CRITICAL')\n\nFORMAT = '%(relativeCreated)d %(levelname)s: %(message)s'\n_env_level = os.environ.get(\"PYI_LOG_LEVEL\", \"INFO\")\ntry:\n level = getattr(logging, _env_level.upper())\nexcept AttributeError:\n raise SystemExit(f\"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {LEVELS}.\")\nlogging.basicConfig(format=FORMAT, level=level)\nlogger = getLogger('PyInstaller')\n\n\ndef __add_options(parser):\n parser.add_argument(\n '--log-level',\n choices=LEVELS,\n metavar=\"LEVEL\",\n dest='loglevel',\n help='Amount of detail in build-time console messages. LEVEL may be one of %s (default: INFO). '\n 'Also settable via and overrides the PYI_LOG_LEVEL environment variable.' % ', '.join(LEVELS),\n )\n\n\ndef __process_options(parser, opts):\n if opts.loglevel:\n try:\n level = opts.loglevel.upper()\n _level = getattr(logging, level)\n except AttributeError:\n parser.error('Unknown log level `%s`' % opts.loglevel)\n logger.setLevel(_level)\n os.environ[\"PYI_LOG_LEVEL\"] = level\n", "path": "PyInstaller/log.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2023, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\"\"\"\nLogging module for PyInstaller.\n\"\"\"\n\n__all__ = ['getLogger', 'INFO', 'WARN', 'DEBUG', 'TRACE', 'ERROR', 'FATAL', 'DEPRECATION']\n\nimport os\nimport logging\nfrom logging import DEBUG, ERROR, FATAL, INFO, WARN, getLogger\n\nTRACE = DEBUG - 5\nlogging.addLevelName(TRACE, 'TRACE')\nDEPRECATION = WARN + 5\nlogging.addLevelName(DEPRECATION, 'DEPRECATION')\nLEVELS = {\n 'TRACE': TRACE,\n 'DEBUG': DEBUG,\n 'INFO': INFO,\n 'WARN': WARN,\n 'DEPRECATION': DEPRECATION,\n 'ERROR': ERROR,\n 'FATAL': FATAL,\n}\n\nFORMAT = '%(relativeCreated)d %(levelname)s: %(message)s'\n_env_level = os.environ.get(\"PYI_LOG_LEVEL\", \"INFO\")\ntry:\n level = LEVELS[_env_level.upper()]\nexcept KeyError:\n raise SystemExit(f\"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {list(LEVELS)}.\")\nlogging.basicConfig(format=FORMAT, level=level)\nlogger = getLogger('PyInstaller')\n\n\ndef __add_options(parser):\n parser.add_argument(\n '--log-level',\n choices=LEVELS,\n metavar=\"LEVEL\",\n dest='loglevel',\n help='Amount of detail in build-time console messages. LEVEL may be one of %s (default: INFO). '\n 'Also settable via and overrides the PYI_LOG_LEVEL environment variable.' % ', '.join(LEVELS),\n )\n\n\ndef __process_options(parser, opts):\n if opts.loglevel:\n try:\n level = opts.loglevel.upper()\n _level = LEVELS[level]\n except KeyError:\n parser.error('Unknown log level `%s`' % opts.loglevel)\n logger.setLevel(_level)\n os.environ[\"PYI_LOG_LEVEL\"] = level\n", "path": "PyInstaller/log.py"}]} | 1,072 | 438 |
gh_patches_debug_9449 | rasdani/github-patches | git_diff | mirumee__ariadne-523 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove superfluous schema validation
It turns out that `validate_schema` called in https://github.com/mirumee/ariadne/blob/master/ariadne/executable_schema.py#L26 is not needed here.
In the other hand, `assert_validate_schema` is called here: https://github.com/mirumee/ariadne/blob/master/ariadne/executable_schema.py#L40 which is sufficient.
Fixes #523
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ariadne/executable_schema.py`
Content:
```
1 from typing import Dict, List, Type, Union
2
3 from graphql import (
4 GraphQLSchema,
5 assert_valid_schema,
6 build_ast_schema,
7 parse,
8 validate_schema,
9 )
10
11 from .enums import set_default_enum_values_on_schema
12 from .schema_visitor import SchemaDirectiveVisitor
13 from .types import SchemaBindable
14
15
16 def make_executable_schema(
17 type_defs: Union[str, List[str]],
18 *bindables: Union[SchemaBindable, List[SchemaBindable]],
19 directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,
20 ) -> GraphQLSchema:
21 if isinstance(type_defs, list):
22 type_defs = join_type_defs(type_defs)
23
24 ast_document = parse(type_defs)
25 schema = build_ast_schema(ast_document)
26 validate_schema(schema)
27
28 for bindable in bindables:
29 if isinstance(bindable, list):
30 for obj in bindable:
31 obj.bind_to_schema(schema)
32 else:
33 bindable.bind_to_schema(schema)
34
35 set_default_enum_values_on_schema(schema)
36
37 if directives:
38 SchemaDirectiveVisitor.visit_schema_directives(schema, directives)
39
40 assert_valid_schema(schema)
41
42 return schema
43
44
45 def join_type_defs(type_defs: List[str]) -> str:
46 return "\n\n".join(t.strip() for t in type_defs)
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py
--- a/ariadne/executable_schema.py
+++ b/ariadne/executable_schema.py
@@ -5,7 +5,6 @@
assert_valid_schema,
build_ast_schema,
parse,
- validate_schema,
)
from .enums import set_default_enum_values_on_schema
@@ -23,7 +22,6 @@
ast_document = parse(type_defs)
schema = build_ast_schema(ast_document)
- validate_schema(schema)
for bindable in bindables:
if isinstance(bindable, list):
| {"golden_diff": "diff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py\n--- a/ariadne/executable_schema.py\n+++ b/ariadne/executable_schema.py\n@@ -5,7 +5,6 @@\n assert_valid_schema,\n build_ast_schema,\n parse,\n- validate_schema,\n )\n \n from .enums import set_default_enum_values_on_schema\n@@ -23,7 +22,6 @@\n \n ast_document = parse(type_defs)\n schema = build_ast_schema(ast_document)\n- validate_schema(schema)\n \n for bindable in bindables:\n if isinstance(bindable, list):\n", "issue": "Remove superfluous schema validation\nIt turns out that `validate_schema` called in https://github.com/mirumee/ariadne/blob/master/ariadne/executable_schema.py#L26 is not needed here. \r\nIn the other hand, `assert_validate_schema` is called here: https://github.com/mirumee/ariadne/blob/master/ariadne/executable_schema.py#L40 which is sufficient. \r\n\r\nFixes #523 \n", "before_files": [{"content": "from typing import Dict, List, Type, Union\n\nfrom graphql import (\n GraphQLSchema,\n assert_valid_schema,\n build_ast_schema,\n parse,\n validate_schema,\n)\n\nfrom .enums import set_default_enum_values_on_schema\nfrom .schema_visitor import SchemaDirectiveVisitor\nfrom .types import SchemaBindable\n\n\ndef make_executable_schema(\n type_defs: Union[str, List[str]],\n *bindables: Union[SchemaBindable, List[SchemaBindable]],\n directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = join_type_defs(type_defs)\n\n ast_document = parse(type_defs)\n schema = build_ast_schema(ast_document)\n validate_schema(schema)\n\n for bindable in bindables:\n if isinstance(bindable, list):\n for obj in bindable:\n obj.bind_to_schema(schema)\n else:\n bindable.bind_to_schema(schema)\n\n set_default_enum_values_on_schema(schema)\n\n if directives:\n SchemaDirectiveVisitor.visit_schema_directives(schema, directives)\n\n assert_valid_schema(schema)\n\n return schema\n\n\ndef join_type_defs(type_defs: List[str]) -> str:\n return \"\\n\\n\".join(t.strip() for t in type_defs)\n", "path": "ariadne/executable_schema.py"}], "after_files": [{"content": "from typing import Dict, List, Type, Union\n\nfrom graphql import (\n GraphQLSchema,\n assert_valid_schema,\n build_ast_schema,\n parse,\n)\n\nfrom .enums import set_default_enum_values_on_schema\nfrom .schema_visitor import SchemaDirectiveVisitor\nfrom .types import SchemaBindable\n\n\ndef make_executable_schema(\n type_defs: Union[str, List[str]],\n *bindables: Union[SchemaBindable, List[SchemaBindable]],\n directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = join_type_defs(type_defs)\n\n ast_document = parse(type_defs)\n schema = build_ast_schema(ast_document)\n\n for bindable in bindables:\n if isinstance(bindable, list):\n for obj in bindable:\n obj.bind_to_schema(schema)\n else:\n bindable.bind_to_schema(schema)\n\n set_default_enum_values_on_schema(schema)\n\n if directives:\n SchemaDirectiveVisitor.visit_schema_directives(schema, directives)\n\n assert_valid_schema(schema)\n\n return schema\n\n\ndef join_type_defs(type_defs: List[str]) -> str:\n return \"\\n\\n\".join(t.strip() for t in type_defs)\n", "path": "ariadne/executable_schema.py"}]} | 720 | 138 |
gh_patches_debug_51325 | rasdani/github-patches | git_diff | scikit-image__scikit-image-6307 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Undefined names in Python code found with flake8
## Description
## Way to reproduce
[flake8](http://flake8.pycqa.org) testing of https://github.com/scikit-image/scikit-image on Python 3.7.1
$ __flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics__
```
./skimage/measure/mc_meta/createluts.py:139:18: F821 undefined name 'luts'
for a in dir(luts):
^
./doc/ext/notebook_doc.py:1:1: F822 undefined name 'python_to_notebook' in __all__
__all__ = ['python_to_notebook', 'Notebook']
^
1 F821 undefined name 'luts'
1 F822 undefined name 'python_to_notebook' in __all__
2
```
__E901,E999,F821,F822,F823__ are the "_showstopper_" [flake8](http://flake8.pycqa.org) issues that can halt the runtime with a SyntaxError, NameError, etc. These 5 are different from most other flake8 issues which are merely "style violations" -- useful for readability but they do not effect runtime safety.
* F821: undefined name `name`
* F822: undefined name `name` in `__all__`
* F823: local variable name referenced before assignment
* E901: SyntaxError or IndentationError
* E999: SyntaxError -- failed to compile a file into an Abstract Syntax Tree
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/ext/notebook_doc.py`
Content:
```
1 __all__ = ['python_to_notebook', 'Notebook']
2
3 import json
4 import copy
5 import warnings
6
7
8 # Skeleton notebook in JSON format
9 skeleton_nb = """{
10 "metadata": {
11 "name":""
12 },
13 "nbformat": 3,
14 "nbformat_minor": 0,
15 "worksheets": [
16 {
17 "cells": [
18 {
19 "cell_type": "code",
20 "collapsed": false,
21 "input": [
22 "%matplotlib inline"
23 ],
24 "language": "python",
25 "metadata": {},
26 "outputs": []
27 }
28 ],
29 "metadata": {}
30 }
31 ]
32 }"""
33
34
35 class Notebook(object):
36 """
37 Notebook object for building an IPython notebook cell-by-cell.
38 """
39
40 def __init__(self):
41 # cell type code
42 self.cell_code = {
43 'cell_type': 'code',
44 'collapsed': False,
45 'input': [
46 '# Code Goes Here'
47 ],
48 'language': 'python',
49 'metadata': {},
50 'outputs': []
51 }
52
53 # cell type markdown
54 self.cell_md = {
55 'cell_type': 'markdown',
56 'metadata': {},
57 'source': [
58 'Markdown Goes Here'
59 ]
60 }
61
62 self.template = json.loads(skeleton_nb)
63 self.cell_type = {'input': self.cell_code, 'source': self.cell_md}
64 self.valuetype_to_celltype = {'code': 'input', 'markdown': 'source'}
65
66 def add_cell(self, value, cell_type='code'):
67 """Add a notebook cell.
68
69 Parameters
70 ----------
71 value : str
72 Cell content.
73 cell_type : {'code', 'markdown'}
74 Type of content (default is 'code').
75
76 """
77 if cell_type in ['markdown', 'code']:
78 key = self.valuetype_to_celltype[cell_type]
79 cells = self.template['worksheets'][0]['cells']
80 cells.append(copy.deepcopy(self.cell_type[key]))
81 # assign value to the last cell
82 cells[-1][key] = value
83 else:
84 warnings.warn('Ignoring unsupported cell type (%s)' % cell_type)
85
86 def json(self):
87 """Return a JSON representation of the notebook.
88
89 Returns
90 -------
91 str
92 JSON notebook.
93
94 """
95 return json.dumps(self.template, indent=2)
96
97
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doc/ext/notebook_doc.py b/doc/ext/notebook_doc.py
--- a/doc/ext/notebook_doc.py
+++ b/doc/ext/notebook_doc.py
@@ -1,4 +1,4 @@
-__all__ = ['python_to_notebook', 'Notebook']
+__all__ = ['Notebook']
import json
import copy
| {"golden_diff": "diff --git a/doc/ext/notebook_doc.py b/doc/ext/notebook_doc.py\n--- a/doc/ext/notebook_doc.py\n+++ b/doc/ext/notebook_doc.py\n@@ -1,4 +1,4 @@\n-__all__ = ['python_to_notebook', 'Notebook']\n+__all__ = ['Notebook']\n \n import json\n import copy\n", "issue": "Undefined names in Python code found with flake8\n## Description\r\n\r\n\r\n## Way to reproduce\r\n[flake8](http://flake8.pycqa.org) testing of https://github.com/scikit-image/scikit-image on Python 3.7.1\r\n\r\n$ __flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics__\r\n```\r\n./skimage/measure/mc_meta/createluts.py:139:18: F821 undefined name 'luts'\r\n for a in dir(luts):\r\n ^\r\n./doc/ext/notebook_doc.py:1:1: F822 undefined name 'python_to_notebook' in __all__\r\n__all__ = ['python_to_notebook', 'Notebook']\r\n^\r\n1 F821 undefined name 'luts'\r\n1 F822 undefined name 'python_to_notebook' in __all__\r\n2\r\n```\r\n__E901,E999,F821,F822,F823__ are the \"_showstopper_\" [flake8](http://flake8.pycqa.org) issues that can halt the runtime with a SyntaxError, NameError, etc. These 5 are different from most other flake8 issues which are merely \"style violations\" -- useful for readability but they do not effect runtime safety.\r\n* F821: undefined name `name`\r\n* F822: undefined name `name` in `__all__`\r\n* F823: local variable name referenced before assignment\r\n* E901: SyntaxError or IndentationError\r\n* E999: SyntaxError -- failed to compile a file into an Abstract Syntax Tree\r\n\n", "before_files": [{"content": "__all__ = ['python_to_notebook', 'Notebook']\n\nimport json\nimport copy\nimport warnings\n\n\n# Skeleton notebook in JSON format\nskeleton_nb = \"\"\"{\n \"metadata\": {\n \"name\":\"\"\n },\n \"nbformat\": 3,\n \"nbformat_minor\": 0,\n \"worksheets\": [\n {\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"collapsed\": false,\n \"input\": [\n \"%matplotlib inline\"\n ],\n \"language\": \"python\",\n \"metadata\": {},\n \"outputs\": []\n }\n ],\n \"metadata\": {}\n }\n ]\n}\"\"\"\n\n\nclass Notebook(object):\n \"\"\"\n Notebook object for building an IPython notebook cell-by-cell.\n \"\"\"\n\n def __init__(self):\n # cell type code\n self.cell_code = {\n 'cell_type': 'code',\n 'collapsed': False,\n 'input': [\n '# Code Goes Here'\n ],\n 'language': 'python',\n 'metadata': {},\n 'outputs': []\n }\n\n # cell type markdown\n self.cell_md = {\n 'cell_type': 'markdown',\n 'metadata': {},\n 'source': [\n 'Markdown Goes Here'\n ]\n }\n\n self.template = json.loads(skeleton_nb)\n self.cell_type = {'input': self.cell_code, 'source': self.cell_md}\n self.valuetype_to_celltype = {'code': 'input', 'markdown': 'source'}\n\n def add_cell(self, value, cell_type='code'):\n \"\"\"Add a notebook cell.\n\n Parameters\n ----------\n value : str\n Cell content.\n cell_type : {'code', 'markdown'}\n Type of content (default is 'code').\n\n \"\"\"\n if cell_type in ['markdown', 'code']:\n key = self.valuetype_to_celltype[cell_type]\n cells = self.template['worksheets'][0]['cells']\n cells.append(copy.deepcopy(self.cell_type[key]))\n # assign value to the last cell\n cells[-1][key] = value\n else:\n warnings.warn('Ignoring unsupported cell type (%s)' % cell_type)\n\n def json(self):\n \"\"\"Return a JSON representation of the notebook.\n\n Returns\n -------\n str\n JSON notebook.\n\n \"\"\"\n return json.dumps(self.template, indent=2)\n\n\n", "path": "doc/ext/notebook_doc.py"}], "after_files": [{"content": "__all__ = ['Notebook']\n\nimport json\nimport copy\nimport warnings\n\n\n# Skeleton notebook in JSON format\nskeleton_nb = \"\"\"{\n \"metadata\": {\n \"name\":\"\"\n },\n \"nbformat\": 3,\n \"nbformat_minor\": 0,\n \"worksheets\": [\n {\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"collapsed\": false,\n \"input\": [\n \"%matplotlib inline\"\n ],\n \"language\": \"python\",\n \"metadata\": {},\n \"outputs\": []\n }\n ],\n \"metadata\": {}\n }\n ]\n}\"\"\"\n\n\nclass Notebook(object):\n \"\"\"\n Notebook object for building an IPython notebook cell-by-cell.\n \"\"\"\n\n def __init__(self):\n # cell type code\n self.cell_code = {\n 'cell_type': 'code',\n 'collapsed': False,\n 'input': [\n '# Code Goes Here'\n ],\n 'language': 'python',\n 'metadata': {},\n 'outputs': []\n }\n\n # cell type markdown\n self.cell_md = {\n 'cell_type': 'markdown',\n 'metadata': {},\n 'source': [\n 'Markdown Goes Here'\n ]\n }\n\n self.template = json.loads(skeleton_nb)\n self.cell_type = {'input': self.cell_code, 'source': self.cell_md}\n self.valuetype_to_celltype = {'code': 'input', 'markdown': 'source'}\n\n def add_cell(self, value, cell_type='code'):\n \"\"\"Add a notebook cell.\n\n Parameters\n ----------\n value : str\n Cell content.\n cell_type : {'code', 'markdown'}\n Type of content (default is 'code').\n\n \"\"\"\n if cell_type in ['markdown', 'code']:\n key = self.valuetype_to_celltype[cell_type]\n cells = self.template['worksheets'][0]['cells']\n cells.append(copy.deepcopy(self.cell_type[key]))\n # assign value to the last cell\n cells[-1][key] = value\n else:\n warnings.warn('Ignoring unsupported cell type (%s)' % cell_type)\n\n def json(self):\n \"\"\"Return a JSON representation of the notebook.\n\n Returns\n -------\n str\n JSON notebook.\n\n \"\"\"\n return json.dumps(self.template, indent=2)\n\n\n", "path": "doc/ext/notebook_doc.py"}]} | 1,321 | 81 |
gh_patches_debug_22583 | rasdani/github-patches | git_diff | PennyLaneAI__pennylane-583 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Avoid quadratic scaling of template integration tests
#### Issue description
Currently, in ``test_templates.py`` a compatibility test is performed by having every template in the library (of the same device) applied before every other one - a quadratic growth of test cases in the number of templates. This becomes prohibitive, and we should find another solution that tests templates' compatibility.
#### Additional information
The issue could be easily fixed by defining small dummy templates that are called before and after the tested template. We could also try to make the tested templates pass parameters to each other.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pennylane/templates/embeddings/basis.py`
Content:
```
1 # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 r"""
15 Contains the ``BasisEmbedding`` template.
16 """
17 # pylint: disable-msg=too-many-branches,too-many-arguments,protected-access
18 import numpy as np
19
20 from pennylane.templates.decorator import template
21 from pennylane.ops import BasisState
22 from pennylane.templates.utils import check_shape, check_wires, get_shape
23
24
25 @template
26 def BasisEmbedding(features, wires):
27 r"""Encodes :math:`n` binary features into a basis state of :math:`n` qubits.
28
29 For example, for ``features=np.array([0, 1, 0])``, the quantum system will be
30 prepared in state :math:`|010 \rangle`.
31
32 .. warning::
33
34 ``BasisEmbedding`` calls a circuit whose architecture depends on the binary features.
35 The ``features`` argument is therefore not differentiable when using the template, and
36 gradients with respect to the argument cannot be computed by PennyLane.
37
38 Args:
39 features (array): binary input array of shape ``(n, )``
40 wires (Sequence[int] or int): qubit indices that the template acts on
41
42 Raises:
43 ValueError: if inputs do not have the correct format
44 """
45
46 #############
47 # Input checks
48
49 wires = check_wires(wires)
50
51 expected_shape = (len(wires),)
52 check_shape(
53 features,
54 expected_shape,
55 msg="'features' must be of shape {}; got {}" "".format(expected_shape, get_shape(features)),
56 )
57
58 if any([b not in [0, 1] for b in features]):
59 raise ValueError("'basis_state' must only consist of 0s and 1s; got {}".format(features))
60
61 ###############
62
63 features = np.array(features)
64 BasisState(features, wires=wires)
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pennylane/templates/embeddings/basis.py b/pennylane/templates/embeddings/basis.py
--- a/pennylane/templates/embeddings/basis.py
+++ b/pennylane/templates/embeddings/basis.py
@@ -15,11 +15,11 @@
Contains the ``BasisEmbedding`` template.
"""
# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access
-import numpy as np
+from collections import Iterable
from pennylane.templates.decorator import template
-from pennylane.ops import BasisState
-from pennylane.templates.utils import check_shape, check_wires, get_shape
+from pennylane.templates.utils import check_shape, check_wires, get_shape, check_type
+import pennylane as qml
@template
@@ -48,6 +48,10 @@
wires = check_wires(wires)
+ check_type(
+ features, [Iterable], msg="'features' must be iterable; got type {}".format(type(features))
+ )
+
expected_shape = (len(wires),)
check_shape(
features,
@@ -60,5 +64,6 @@
###############
- features = np.array(features)
- BasisState(features, wires=wires)
+ for wire, bit in zip(wires, features):
+ if bit == 1:
+ qml.PauliX(wire)
| {"golden_diff": "diff --git a/pennylane/templates/embeddings/basis.py b/pennylane/templates/embeddings/basis.py\n--- a/pennylane/templates/embeddings/basis.py\n+++ b/pennylane/templates/embeddings/basis.py\n@@ -15,11 +15,11 @@\n Contains the ``BasisEmbedding`` template.\n \"\"\"\n # pylint: disable-msg=too-many-branches,too-many-arguments,protected-access\n-import numpy as np\n+from collections import Iterable\n \n from pennylane.templates.decorator import template\n-from pennylane.ops import BasisState\n-from pennylane.templates.utils import check_shape, check_wires, get_shape\n+from pennylane.templates.utils import check_shape, check_wires, get_shape, check_type\n+import pennylane as qml\n \n \n @template\n@@ -48,6 +48,10 @@\n \n wires = check_wires(wires)\n \n+ check_type(\n+ features, [Iterable], msg=\"'features' must be iterable; got type {}\".format(type(features))\n+ )\n+\n expected_shape = (len(wires),)\n check_shape(\n features,\n@@ -60,5 +64,6 @@\n \n ###############\n \n- features = np.array(features)\n- BasisState(features, wires=wires)\n+ for wire, bit in zip(wires, features):\n+ if bit == 1:\n+ qml.PauliX(wire)\n", "issue": "Avoid quadratic scaling of template integration tests\n#### Issue description\r\n\r\nCurrently, in ``test_templates.py`` a compatibility test is performed by having every template in the library (of the same device) applied before every other one - a quadratic growth of test cases in the number of templates. This becomes prohibitive, and we should find another solution that tests templates' compatibility.\r\n\r\n#### Additional information\r\n\r\nThe issue could be easily fixed by defining small dummy templates that are called before and after the tested template. We could also try to make the tested templates pass parameters to each other.\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018-2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nContains the ``BasisEmbedding`` template.\n\"\"\"\n# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access\nimport numpy as np\n\nfrom pennylane.templates.decorator import template\nfrom pennylane.ops import BasisState\nfrom pennylane.templates.utils import check_shape, check_wires, get_shape\n\n\n@template\ndef BasisEmbedding(features, wires):\n r\"\"\"Encodes :math:`n` binary features into a basis state of :math:`n` qubits.\n\n For example, for ``features=np.array([0, 1, 0])``, the quantum system will be\n prepared in state :math:`|010 \\rangle`.\n\n .. warning::\n\n ``BasisEmbedding`` calls a circuit whose architecture depends on the binary features.\n The ``features`` argument is therefore not differentiable when using the template, and\n gradients with respect to the argument cannot be computed by PennyLane.\n\n Args:\n features (array): binary input array of shape ``(n, )``\n wires (Sequence[int] or int): qubit indices that the template acts on\n\n Raises:\n ValueError: if inputs do not have the correct format\n \"\"\"\n\n #############\n # Input checks\n\n wires = check_wires(wires)\n\n expected_shape = (len(wires),)\n check_shape(\n features,\n expected_shape,\n msg=\"'features' must be of shape {}; got {}\" \"\".format(expected_shape, get_shape(features)),\n )\n\n if any([b not in [0, 1] for b in features]):\n raise ValueError(\"'basis_state' must only consist of 0s and 1s; got {}\".format(features))\n\n ###############\n\n features = np.array(features)\n BasisState(features, wires=wires)\n", "path": "pennylane/templates/embeddings/basis.py"}], "after_files": [{"content": "# Copyright 2018-2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nContains the ``BasisEmbedding`` template.\n\"\"\"\n# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access\nfrom collections import Iterable\n\nfrom pennylane.templates.decorator import template\nfrom pennylane.templates.utils import check_shape, check_wires, get_shape, check_type\nimport pennylane as qml\n\n\n@template\ndef BasisEmbedding(features, wires):\n r\"\"\"Encodes :math:`n` binary features into a basis state of :math:`n` qubits.\n\n For example, for ``features=np.array([0, 1, 0])``, the quantum system will be\n prepared in state :math:`|010 \\rangle`.\n\n .. warning::\n\n ``BasisEmbedding`` calls a circuit whose architecture depends on the binary features.\n The ``features`` argument is therefore not differentiable when using the template, and\n gradients with respect to the argument cannot be computed by PennyLane.\n\n Args:\n features (array): binary input array of shape ``(n, )``\n wires (Sequence[int] or int): qubit indices that the template acts on\n\n Raises:\n ValueError: if inputs do not have the correct format\n \"\"\"\n\n #############\n # Input checks\n\n wires = check_wires(wires)\n\n check_type(\n features, [Iterable], msg=\"'features' must be iterable; got type {}\".format(type(features))\n )\n\n expected_shape = (len(wires),)\n check_shape(\n features,\n expected_shape,\n msg=\"'features' must be of shape {}; got {}\" \"\".format(expected_shape, get_shape(features)),\n )\n\n if any([b not in [0, 1] for b in features]):\n raise ValueError(\"'basis_state' must only consist of 0s and 1s; got {}\".format(features))\n\n ###############\n\n for wire, bit in zip(wires, features):\n if bit == 1:\n qml.PauliX(wire)\n", "path": "pennylane/templates/embeddings/basis.py"}]} | 1,032 | 316 |
gh_patches_debug_12916 | rasdani/github-patches | git_diff | kivy__python-for-android-2469 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ctypes.util.find_library 64-bit error
I was trying to use `zeroconf` package for my application. It worked perfectly on `armeabi-v7a`, however the program crashed on launch on `arm64-v8a` (both tested on Huawei P30).
I have investigated the issues and discovered that the problem is with `ctypes.util.find_library` or, more precisely with the p4a module `andoroid._ctypes_library_finder` in the function `find_library`.
The actual problem is that this function finds 32bit libraries regardless of the actual architecture. For example
```python
ctypes.util.find_library('c')
```
returns `/system/lib/libc.so` both for 32- and 64-bit architecture. The correct behavior is to return this if Python is compiled for 32-bit and `/system/lib64/libc.so` for 64-bit one.
Below is the code of a simple Kivy app that shows the issue:
```python
# main.py
import sys
import ctypes
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.label import Label
root = Builder.load_string("""\
#:import sys sys
BoxLayout:
orientation: 'vertical'
Label:
id: arch
size_hint_y: 1
text_size: self.size
halign: 'center'
valign: 'middle'
text: '64-bit' if sys.maxsize > 2**32 else '32-bit'
Label:
id: lib
size_hint_y: 1
text_size: self.size
halign: 'center'
valign: 'middle'
Label:
id: err
size_hint_y: 4
text_size: self.size
halign: 'left'
valign: 'middle'
""")
class TestCtypesApp(App):
def build(self):
lib = ctypes.util.find_library('c')
root.ids.lib.text = str(lib)
try:
cdll = ctypes.CDLL(lib)
except Exception as err:
root.ids.err.text = "{}: {}".format(type(err).__name__, err)
else:
root.ids.err.text = 'CORRECT'
root.ids.err.halign = 'center'
return root
if __name__ == '__main__':
TestCtypesApp().run()
```
```ini
# buildozer.spec
[app]
title = Test CTypes
package.name = testctypes
package.domain = org.test
source.dir = .
source.include_exts = py
version = 0.1
requirements = python3,kivy
orientation = portrait
osx.python_version = 3
osx.kivy_version = 1.9.1
fullscreen = 0
android.api = 30
android.arch = armeabi-v7a
ios.kivy_ios_url = https://github.com/kivy/kivy-ios
ios.kivy_ios_branch = master
ios.ios_deploy_url = https://github.com/phonegap/ios-deploy
ios.ios_deploy_branch = 1.7.0
[buildozer]
log_level = 2
warn_on_root = 1
[app@arm64]
android.arch = arm64-v8a
```
When compiled for `armeabi-v7a` it shows:
```
32-bit
/system/lib/libc.so
CORRECT
```
while on `arm64-v8a`:
```
64-bit
/system/lib/libc.so
OSError: dlopen failed: library "/system/lib/libc.so" needed or dlopened by
"/data/data/org.test.testctypes/files/app/_python_bundle/modules/_ctypes.cpython-38.so"
is not accessible for this namespace "classloader-namespace"
```
The expected output is:
```
64-bit
/system/lib64/libc.so
CORRECT
```
The source of this problem is in the line 47 of the file [pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py](../blob/develop/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py#L47). For 64-bit Python (build target arch matters, not the system archiecture), the libraries to search should be `["/system/lib64/libc.so", "/system/lib/libc.so"]`.
I am also submitting a pull request resolving this issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py`
Content:
```
1
2 import os
3
4
5 def get_activity_lib_dir(activity_name):
6 from jnius import autoclass
7
8 # Get the actual activity instance:
9 activity_class = autoclass(activity_name)
10 if activity_class is None:
11 return None
12 activity = None
13 if hasattr(activity_class, "mActivity") and \
14 activity_class.mActivity is not None:
15 activity = activity_class.mActivity
16 elif hasattr(activity_class, "mService") and \
17 activity_class.mService is not None:
18 activity = activity_class.mService
19 if activity is None:
20 return None
21
22 # Extract the native lib dir from the activity instance:
23 package_name = activity.getApplicationContext().getPackageName()
24 manager = activity.getApplicationContext().getPackageManager()
25 manager_class = autoclass("android.content.pm.PackageManager")
26 native_lib_dir = manager.getApplicationInfo(
27 package_name, manager_class.GET_SHARED_LIBRARY_FILES
28 ).nativeLibraryDir
29 return native_lib_dir
30
31
32 def does_libname_match_filename(search_name, file_path):
33 # Filter file names so given search_name="mymodule" we match one of:
34 # mymodule.so (direct name + .so)
35 # libmymodule.so (added lib prefix)
36 # mymodule.arm64.so (added dot-separated middle parts)
37 # mymodule.so.1.3.4 (added dot-separated version tail)
38 # and all above (all possible combinations)
39 import re
40 file_name = os.path.basename(file_path)
41 return (re.match(r"^(lib)?" + re.escape(search_name) +
42 r"\.(.*\.)?so(\.[0-9]+)*$", file_name) is not None)
43
44
45 def find_library(name):
46 # Obtain all places for native libraries:
47 lib_search_dirs = ["/system/lib"]
48 lib_dir_1 = get_activity_lib_dir("org.kivy.android.PythonActivity")
49 if lib_dir_1 is not None:
50 lib_search_dirs.insert(0, lib_dir_1)
51 lib_dir_2 = get_activity_lib_dir("org.kivy.android.PythonService")
52 if lib_dir_2 is not None and lib_dir_2 not in lib_search_dirs:
53 lib_search_dirs.insert(0, lib_dir_2)
54
55 # Now scan the lib dirs:
56 for lib_dir in [ldir for ldir in lib_search_dirs if os.path.exists(ldir)]:
57 filelist = [
58 f for f in os.listdir(lib_dir)
59 if does_libname_match_filename(name, f)
60 ]
61 if len(filelist) > 0:
62 return os.path.join(lib_dir, filelist[0])
63 return None
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py b/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py
--- a/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py
+++ b/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py
@@ -1,4 +1,5 @@
+import sys
import os
@@ -44,7 +45,10 @@
def find_library(name):
# Obtain all places for native libraries:
- lib_search_dirs = ["/system/lib"]
+ if sys.maxsize > 2**32: # 64bit-build
+ lib_search_dirs = ["/system/lib64", "/system/lib"]
+ else:
+ lib_search_dirs = ["/system/lib"]
lib_dir_1 = get_activity_lib_dir("org.kivy.android.PythonActivity")
if lib_dir_1 is not None:
lib_search_dirs.insert(0, lib_dir_1)
| {"golden_diff": "diff --git a/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py b/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py\n--- a/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py\n+++ b/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py\n@@ -1,4 +1,5 @@\n \n+import sys\n import os\n \n \n@@ -44,7 +45,10 @@\n \n def find_library(name):\n # Obtain all places for native libraries:\n- lib_search_dirs = [\"/system/lib\"]\n+ if sys.maxsize > 2**32: # 64bit-build\n+ lib_search_dirs = [\"/system/lib64\", \"/system/lib\"]\n+ else:\n+ lib_search_dirs = [\"/system/lib\"]\n lib_dir_1 = get_activity_lib_dir(\"org.kivy.android.PythonActivity\")\n if lib_dir_1 is not None:\n lib_search_dirs.insert(0, lib_dir_1)\n", "issue": "ctypes.util.find_library 64-bit error\nI was trying to use `zeroconf` package for my application. It worked perfectly on `armeabi-v7a`, however the program crashed on launch on `arm64-v8a` (both tested on Huawei P30).\r\n\r\nI have investigated the issues and discovered that the problem is with `ctypes.util.find_library` or, more precisely with the p4a module `andoroid._ctypes_library_finder` in the function `find_library`.\r\n\r\nThe actual problem is that this function finds 32bit libraries regardless of the actual architecture. For example\r\n\r\n```python\r\nctypes.util.find_library('c')\r\n```\r\n\r\nreturns `/system/lib/libc.so` both for 32- and 64-bit architecture. The correct behavior is to return this if Python is compiled for 32-bit and `/system/lib64/libc.so` for 64-bit one.\r\n\r\nBelow is the code of a simple Kivy app that shows the issue:\r\n\r\n```python\r\n# main.py\r\nimport sys\r\nimport ctypes\r\n\r\n\r\nfrom kivy.app import App\r\nfrom kivy.lang import Builder\r\nfrom kivy.uix.label import Label\r\n\r\n\r\nroot = Builder.load_string(\"\"\"\\\r\n#:import sys sys\r\n\r\nBoxLayout:\r\n orientation: 'vertical'\r\n Label:\r\n id: arch\r\n size_hint_y: 1\r\n text_size: self.size\r\n halign: 'center'\r\n valign: 'middle'\r\n text: '64-bit' if sys.maxsize > 2**32 else '32-bit'\r\n Label:\r\n id: lib\r\n size_hint_y: 1\r\n text_size: self.size\r\n halign: 'center'\r\n valign: 'middle'\r\n Label:\r\n id: err\r\n size_hint_y: 4\r\n text_size: self.size\r\n halign: 'left'\r\n valign: 'middle'\r\n\"\"\")\r\n\r\n\r\nclass TestCtypesApp(App):\r\n\r\n def build(self):\r\n lib = ctypes.util.find_library('c')\r\n root.ids.lib.text = str(lib)\r\n try:\r\n cdll = ctypes.CDLL(lib)\r\n except Exception as err:\r\n root.ids.err.text = \"{}: {}\".format(type(err).__name__, err)\r\n else:\r\n root.ids.err.text = 'CORRECT'\r\n root.ids.err.halign = 'center'\r\n return root\r\n\r\n\r\nif __name__ == '__main__':\r\n TestCtypesApp().run()\r\n```\r\n\r\n```ini\r\n# buildozer.spec\r\n[app]\r\ntitle = Test CTypes\r\npackage.name = testctypes\r\npackage.domain = org.test\r\nsource.dir = .\r\nsource.include_exts = py\r\nversion = 0.1\r\nrequirements = python3,kivy\r\norientation = portrait\r\nosx.python_version = 3\r\nosx.kivy_version = 1.9.1\r\nfullscreen = 0\r\nandroid.api = 30\r\nandroid.arch = armeabi-v7a\r\nios.kivy_ios_url = https://github.com/kivy/kivy-ios\r\nios.kivy_ios_branch = master\r\nios.ios_deploy_url = https://github.com/phonegap/ios-deploy\r\nios.ios_deploy_branch = 1.7.0\r\n\r\n[buildozer]\r\nlog_level = 2\r\nwarn_on_root = 1\r\n\r\n[app@arm64]\r\nandroid.arch = arm64-v8a\r\n```\r\n\r\nWhen compiled for `armeabi-v7a` it shows:\r\n\r\n```\r\n32-bit\r\n\r\n/system/lib/libc.so\r\n\r\nCORRECT\r\n```\r\n\r\nwhile on `arm64-v8a`:\r\n\r\n```\r\n64-bit\r\n\r\n/system/lib/libc.so\r\n\r\nOSError: dlopen failed: library \"/system/lib/libc.so\" needed or dlopened by\r\n\"/data/data/org.test.testctypes/files/app/_python_bundle/modules/_ctypes.cpython-38.so\"\r\nis not accessible for this namespace \"classloader-namespace\"\r\n```\r\n\r\nThe expected output is:\r\n\r\n```\r\n64-bit\r\n\r\n/system/lib64/libc.so\r\n\r\nCORRECT\r\n```\r\n\r\nThe source of this problem is in the line 47 of the file [pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py](../blob/develop/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py#L47). For 64-bit Python (build target arch matters, not the system archiecture), the libraries to search should be `[\"/system/lib64/libc.so\", \"/system/lib/libc.so\"]`.\r\n\r\nI am also submitting a pull request resolving this issue.\n", "before_files": [{"content": "\nimport os\n\n\ndef get_activity_lib_dir(activity_name):\n from jnius import autoclass\n\n # Get the actual activity instance:\n activity_class = autoclass(activity_name)\n if activity_class is None:\n return None\n activity = None\n if hasattr(activity_class, \"mActivity\") and \\\n activity_class.mActivity is not None:\n activity = activity_class.mActivity\n elif hasattr(activity_class, \"mService\") and \\\n activity_class.mService is not None:\n activity = activity_class.mService\n if activity is None:\n return None\n\n # Extract the native lib dir from the activity instance:\n package_name = activity.getApplicationContext().getPackageName()\n manager = activity.getApplicationContext().getPackageManager()\n manager_class = autoclass(\"android.content.pm.PackageManager\")\n native_lib_dir = manager.getApplicationInfo(\n package_name, manager_class.GET_SHARED_LIBRARY_FILES\n ).nativeLibraryDir\n return native_lib_dir\n\n\ndef does_libname_match_filename(search_name, file_path):\n # Filter file names so given search_name=\"mymodule\" we match one of:\n # mymodule.so (direct name + .so)\n # libmymodule.so (added lib prefix)\n # mymodule.arm64.so (added dot-separated middle parts)\n # mymodule.so.1.3.4 (added dot-separated version tail)\n # and all above (all possible combinations)\n import re\n file_name = os.path.basename(file_path)\n return (re.match(r\"^(lib)?\" + re.escape(search_name) +\n r\"\\.(.*\\.)?so(\\.[0-9]+)*$\", file_name) is not None)\n\n\ndef find_library(name):\n # Obtain all places for native libraries:\n lib_search_dirs = [\"/system/lib\"]\n lib_dir_1 = get_activity_lib_dir(\"org.kivy.android.PythonActivity\")\n if lib_dir_1 is not None:\n lib_search_dirs.insert(0, lib_dir_1)\n lib_dir_2 = get_activity_lib_dir(\"org.kivy.android.PythonService\")\n if lib_dir_2 is not None and lib_dir_2 not in lib_search_dirs:\n lib_search_dirs.insert(0, lib_dir_2)\n\n # Now scan the lib dirs:\n for lib_dir in [ldir for ldir in lib_search_dirs if os.path.exists(ldir)]:\n filelist = [\n f for f in os.listdir(lib_dir)\n if does_libname_match_filename(name, f)\n ]\n if len(filelist) > 0:\n return os.path.join(lib_dir, filelist[0])\n return None\n", "path": "pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py"}], "after_files": [{"content": "\nimport sys\nimport os\n\n\ndef get_activity_lib_dir(activity_name):\n from jnius import autoclass\n\n # Get the actual activity instance:\n activity_class = autoclass(activity_name)\n if activity_class is None:\n return None\n activity = None\n if hasattr(activity_class, \"mActivity\") and \\\n activity_class.mActivity is not None:\n activity = activity_class.mActivity\n elif hasattr(activity_class, \"mService\") and \\\n activity_class.mService is not None:\n activity = activity_class.mService\n if activity is None:\n return None\n\n # Extract the native lib dir from the activity instance:\n package_name = activity.getApplicationContext().getPackageName()\n manager = activity.getApplicationContext().getPackageManager()\n manager_class = autoclass(\"android.content.pm.PackageManager\")\n native_lib_dir = manager.getApplicationInfo(\n package_name, manager_class.GET_SHARED_LIBRARY_FILES\n ).nativeLibraryDir\n return native_lib_dir\n\n\ndef does_libname_match_filename(search_name, file_path):\n # Filter file names so given search_name=\"mymodule\" we match one of:\n # mymodule.so (direct name + .so)\n # libmymodule.so (added lib prefix)\n # mymodule.arm64.so (added dot-separated middle parts)\n # mymodule.so.1.3.4 (added dot-separated version tail)\n # and all above (all possible combinations)\n import re\n file_name = os.path.basename(file_path)\n return (re.match(r\"^(lib)?\" + re.escape(search_name) +\n r\"\\.(.*\\.)?so(\\.[0-9]+)*$\", file_name) is not None)\n\n\ndef find_library(name):\n # Obtain all places for native libraries:\n if sys.maxsize > 2**32: # 64bit-build\n lib_search_dirs = [\"/system/lib64\", \"/system/lib\"]\n else:\n lib_search_dirs = [\"/system/lib\"]\n lib_dir_1 = get_activity_lib_dir(\"org.kivy.android.PythonActivity\")\n if lib_dir_1 is not None:\n lib_search_dirs.insert(0, lib_dir_1)\n lib_dir_2 = get_activity_lib_dir(\"org.kivy.android.PythonService\")\n if lib_dir_2 is not None and lib_dir_2 not in lib_search_dirs:\n lib_search_dirs.insert(0, lib_dir_2)\n\n # Now scan the lib dirs:\n for lib_dir in [ldir for ldir in lib_search_dirs if os.path.exists(ldir)]:\n filelist = [\n f for f in os.listdir(lib_dir)\n if does_libname_match_filename(name, f)\n ]\n if len(filelist) > 0:\n return os.path.join(lib_dir, filelist[0])\n return None\n", "path": "pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py"}]} | 1,921 | 221 |
gh_patches_debug_4282 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3257 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove scrollbars when they are not necessary
The home page shows a scroll bar even when there is nothing to scroll


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/organisation.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.conf import settings
8 from django.db.models import Q
9 from django.utils import six
10 from rest_framework.decorators import api_view
11 from rest_framework.exceptions import ParseError
12 from rest_framework.parsers import JSONParser
13 from rest_framework.response import Response
14 from rest_framework_xml.parsers import XMLParser
15 from rest_framework_xml.compat import etree
16
17 from akvo.rest.views.utils import int_or_none, get_qs_elements_for_page
18 from akvo.rsr.filters import location_choices, get_m49_filter
19 from akvo.rsr.models import Project, Organisation, Country
20 from akvo.rsr.views.utils import apply_keywords, org_projects
21 from ..serializers import OrganisationSerializer, OrganisationDirectorySerializer
22 from ..viewsets import BaseRSRViewSet
23
24
25 class AkvoOrganisationParser(XMLParser):
26 def parse(self, stream, media_type=None, parser_context=None):
27 assert etree, 'XMLParser requires defusedxml to be installed'
28
29 parser_context = parser_context or {}
30 encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
31 parser = etree.DefusedXMLParser(encoding=encoding)
32 try:
33 tree = etree.parse(stream, parser=parser, forbid_dtd=True)
34 except (etree.ParseError, ValueError) as exc:
35 raise ParseError('XML parse error - %s' % six.text_type(exc))
36 return self.organisation_data_from_etree(tree.getroot())
37
38 def organisation_data_from_etree(self, tree):
39 def find_text(tree, str):
40 element = tree.find(str)
41 if element is None:
42 return ''
43 return element.text.strip() if element.text else ""
44
45 def location_data(location_tree):
46 if location_tree is None:
47 return []
48 iso_code = find_text(location_tree, 'iso_code').lower()
49 country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code))
50 country = country.id
51 latitude = find_text(location_tree, 'latitude') or 0
52 longitude = find_text(location_tree, 'longitude') or 0
53 primary = True
54 return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)]
55
56 long_name = find_text(tree, 'name')
57 name = long_name[:25]
58 description = find_text(tree, 'description')
59 url = find_text(tree, 'url')
60 iati_type = find_text(tree, 'iati_organisation_type')
61 new_organisation_type = int(iati_type) if iati_type else 22
62 organisation_type = Organisation.org_type_from_iati_type(new_organisation_type)
63 locations = location_data(tree.find('location/object'))
64 return dict(
65 name=name, long_name=long_name, description=description, url=url,
66 organisation_type=organisation_type, new_organisation_type=new_organisation_type,
67 locations=locations
68 )
69
70
71 class OrganisationViewSet(BaseRSRViewSet):
72 """
73 API endpoint that allows organisations to be viewed or edited.
74 """
75 queryset = Organisation.objects.all()
76 serializer_class = OrganisationSerializer
77 parser_classes = (AkvoOrganisationParser, JSONParser,)
78
79
80 @api_view(['GET'])
81 def organisation_directory(request):
82 """REST view for the update directory."""
83
84 page = request.rsr_page
85 all_organisations = Organisation.objects.all() if not page else _page_organisations(page)
86
87 # Filter updates based on query parameters
88 filter_, text_filter = _create_filters_query(request)
89 organisations = (
90 all_organisations.filter(filter_).distinct() if filter_ is not None else all_organisations
91 )
92 organisations_text_filtered = (
93 organisations.filter(text_filter) if text_filter is not None else organisations
94 )
95 if organisations_text_filtered.exists():
96 organisations = organisations_text_filtered
97
98 # Get the relevant data for typeaheads based on filtered organisations (minus
99 # text filtering, if no organisations were found)
100 locations = [
101 {'id': choice[0], 'name': choice[1]}
102 for choice in location_choices(organisations)
103 ]
104
105 display_organisations = get_qs_elements_for_page(organisations_text_filtered, request)
106
107 # Get related objects of page at once
108 response = {
109 'project_count': all_organisations.count(),
110 'projects': OrganisationDirectorySerializer(display_organisations, many=True).data,
111 'location': locations,
112 }
113 return Response(response)
114
115
116 def _public_projects():
117 """Return all public projects."""
118 return Project.objects.public().published().select_related('partners')
119
120
121 def _page_organisations(page):
122 """Dig out the list or organisations to use."""
123 projects = org_projects(page.organisation) if page.partner_projects else _public_projects()
124 keyword_projects = apply_keywords(page, projects)
125 return keyword_projects.all_partners()
126
127
128 def _create_filters_query(request):
129 """Returns a Q object expression based on query parameters."""
130 location_param = int_or_none(request.GET.get('location'))
131 title_or_subtitle_param = request.GET.get('title_or_subtitle')
132
133 location_filter = (
134 get_m49_filter(location_param, use_recipient_country=False) if location_param else None
135 )
136 title_filter = (
137 Q(name__icontains=title_or_subtitle_param) |
138 Q(long_name__icontains=title_or_subtitle_param)
139 ) if title_or_subtitle_param else None
140 all_filters = [
141 location_filter,
142 ]
143 filters = filter(None, all_filters)
144 return reduce(lambda x, y: x & y, filters) if filters else None, title_filter
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/organisation.py b/akvo/rest/views/organisation.py
--- a/akvo/rest/views/organisation.py
+++ b/akvo/rest/views/organisation.py
@@ -106,7 +106,7 @@
# Get related objects of page at once
response = {
- 'project_count': all_organisations.count(),
+ 'project_count': organisations_text_filtered.count(),
'projects': OrganisationDirectorySerializer(display_organisations, many=True).data,
'location': locations,
}
| {"golden_diff": "diff --git a/akvo/rest/views/organisation.py b/akvo/rest/views/organisation.py\n--- a/akvo/rest/views/organisation.py\n+++ b/akvo/rest/views/organisation.py\n@@ -106,7 +106,7 @@\n \n # Get related objects of page at once\n response = {\n- 'project_count': all_organisations.count(),\n+ 'project_count': organisations_text_filtered.count(),\n 'projects': OrganisationDirectorySerializer(display_organisations, many=True).data,\n 'location': locations,\n }\n", "issue": "Remove scrollbars when they are not necessary\nThe home page shows a scroll bar even when there is nothing to scroll\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.utils import six\nfrom rest_framework.decorators import api_view\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.response import Response\nfrom rest_framework_xml.parsers import XMLParser\nfrom rest_framework_xml.compat import etree\n\nfrom akvo.rest.views.utils import int_or_none, get_qs_elements_for_page\nfrom akvo.rsr.filters import location_choices, get_m49_filter\nfrom akvo.rsr.models import Project, Organisation, Country\nfrom akvo.rsr.views.utils import apply_keywords, org_projects\nfrom ..serializers import OrganisationSerializer, OrganisationDirectorySerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass AkvoOrganisationParser(XMLParser):\n def parse(self, stream, media_type=None, parser_context=None):\n assert etree, 'XMLParser requires defusedxml to be installed'\n\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n parser = etree.DefusedXMLParser(encoding=encoding)\n try:\n tree = etree.parse(stream, parser=parser, forbid_dtd=True)\n except (etree.ParseError, ValueError) as exc:\n raise ParseError('XML parse error - %s' % six.text_type(exc))\n return self.organisation_data_from_etree(tree.getroot())\n\n def organisation_data_from_etree(self, tree):\n def find_text(tree, str):\n element = tree.find(str)\n if element is None:\n return ''\n return element.text.strip() if element.text else \"\"\n\n def location_data(location_tree):\n if location_tree is None:\n return []\n iso_code = find_text(location_tree, 'iso_code').lower()\n country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code))\n country = country.id\n latitude = find_text(location_tree, 'latitude') or 0\n longitude = find_text(location_tree, 'longitude') or 0\n primary = True\n return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)]\n\n long_name = find_text(tree, 'name')\n name = long_name[:25]\n description = find_text(tree, 'description')\n url = find_text(tree, 'url')\n iati_type = find_text(tree, 'iati_organisation_type')\n new_organisation_type = int(iati_type) if iati_type else 22\n organisation_type = Organisation.org_type_from_iati_type(new_organisation_type)\n locations = location_data(tree.find('location/object'))\n return dict(\n name=name, long_name=long_name, description=description, url=url,\n organisation_type=organisation_type, new_organisation_type=new_organisation_type,\n locations=locations\n )\n\n\nclass OrganisationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisations to be viewed or edited.\n \"\"\"\n queryset = Organisation.objects.all()\n serializer_class = OrganisationSerializer\n parser_classes = (AkvoOrganisationParser, JSONParser,)\n\n\n@api_view(['GET'])\ndef organisation_directory(request):\n \"\"\"REST view for the update directory.\"\"\"\n\n page = request.rsr_page\n all_organisations = Organisation.objects.all() if not page else _page_organisations(page)\n\n # Filter updates based on query parameters\n filter_, text_filter = _create_filters_query(request)\n organisations = (\n all_organisations.filter(filter_).distinct() if filter_ is not None else all_organisations\n )\n organisations_text_filtered = (\n organisations.filter(text_filter) if text_filter is not None else organisations\n )\n if organisations_text_filtered.exists():\n organisations = organisations_text_filtered\n\n # Get the relevant data for typeaheads based on filtered organisations (minus\n # text filtering, if no organisations were found)\n locations = [\n {'id': choice[0], 'name': choice[1]}\n for choice in location_choices(organisations)\n ]\n\n display_organisations = get_qs_elements_for_page(organisations_text_filtered, request)\n\n # Get related objects of page at once\n response = {\n 'project_count': all_organisations.count(),\n 'projects': OrganisationDirectorySerializer(display_organisations, many=True).data,\n 'location': locations,\n }\n return Response(response)\n\n\ndef _public_projects():\n \"\"\"Return all public projects.\"\"\"\n return Project.objects.public().published().select_related('partners')\n\n\ndef _page_organisations(page):\n \"\"\"Dig out the list or organisations to use.\"\"\"\n projects = org_projects(page.organisation) if page.partner_projects else _public_projects()\n keyword_projects = apply_keywords(page, projects)\n return keyword_projects.all_partners()\n\n\ndef _create_filters_query(request):\n \"\"\"Returns a Q object expression based on query parameters.\"\"\"\n location_param = int_or_none(request.GET.get('location'))\n title_or_subtitle_param = request.GET.get('title_or_subtitle')\n\n location_filter = (\n get_m49_filter(location_param, use_recipient_country=False) if location_param else None\n )\n title_filter = (\n Q(name__icontains=title_or_subtitle_param) |\n Q(long_name__icontains=title_or_subtitle_param)\n ) if title_or_subtitle_param else None\n all_filters = [\n location_filter,\n ]\n filters = filter(None, all_filters)\n return reduce(lambda x, y: x & y, filters) if filters else None, title_filter\n", "path": "akvo/rest/views/organisation.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.utils import six\nfrom rest_framework.decorators import api_view\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.response import Response\nfrom rest_framework_xml.parsers import XMLParser\nfrom rest_framework_xml.compat import etree\n\nfrom akvo.rest.views.utils import int_or_none, get_qs_elements_for_page\nfrom akvo.rsr.filters import location_choices, get_m49_filter\nfrom akvo.rsr.models import Project, Organisation, Country\nfrom akvo.rsr.views.utils import apply_keywords, org_projects\nfrom ..serializers import OrganisationSerializer, OrganisationDirectorySerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass AkvoOrganisationParser(XMLParser):\n def parse(self, stream, media_type=None, parser_context=None):\n assert etree, 'XMLParser requires defusedxml to be installed'\n\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n parser = etree.DefusedXMLParser(encoding=encoding)\n try:\n tree = etree.parse(stream, parser=parser, forbid_dtd=True)\n except (etree.ParseError, ValueError) as exc:\n raise ParseError('XML parse error - %s' % six.text_type(exc))\n return self.organisation_data_from_etree(tree.getroot())\n\n def organisation_data_from_etree(self, tree):\n def find_text(tree, str):\n element = tree.find(str)\n if element is None:\n return ''\n return element.text.strip() if element.text else \"\"\n\n def location_data(location_tree):\n if location_tree is None:\n return []\n iso_code = find_text(location_tree, 'iso_code').lower()\n country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code))\n country = country.id\n latitude = find_text(location_tree, 'latitude') or 0\n longitude = find_text(location_tree, 'longitude') or 0\n primary = True\n return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)]\n\n long_name = find_text(tree, 'name')\n name = long_name[:25]\n description = find_text(tree, 'description')\n url = find_text(tree, 'url')\n iati_type = find_text(tree, 'iati_organisation_type')\n new_organisation_type = int(iati_type) if iati_type else 22\n organisation_type = Organisation.org_type_from_iati_type(new_organisation_type)\n locations = location_data(tree.find('location/object'))\n return dict(\n name=name, long_name=long_name, description=description, url=url,\n organisation_type=organisation_type, new_organisation_type=new_organisation_type,\n locations=locations\n )\n\n\nclass OrganisationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisations to be viewed or edited.\n \"\"\"\n queryset = Organisation.objects.all()\n serializer_class = OrganisationSerializer\n parser_classes = (AkvoOrganisationParser, JSONParser,)\n\n\n@api_view(['GET'])\ndef organisation_directory(request):\n \"\"\"REST view for the update directory.\"\"\"\n\n page = request.rsr_page\n all_organisations = Organisation.objects.all() if not page else _page_organisations(page)\n\n # Filter updates based on query parameters\n filter_, text_filter = _create_filters_query(request)\n organisations = (\n all_organisations.filter(filter_).distinct() if filter_ is not None else all_organisations\n )\n organisations_text_filtered = (\n organisations.filter(text_filter) if text_filter is not None else organisations\n )\n if organisations_text_filtered.exists():\n organisations = organisations_text_filtered\n\n # Get the relevant data for typeaheads based on filtered organisations (minus\n # text filtering, if no organisations were found)\n locations = [\n {'id': choice[0], 'name': choice[1]}\n for choice in location_choices(organisations)\n ]\n\n display_organisations = get_qs_elements_for_page(organisations_text_filtered, request)\n\n # Get related objects of page at once\n response = {\n 'project_count': organisations_text_filtered.count(),\n 'projects': OrganisationDirectorySerializer(display_organisations, many=True).data,\n 'location': locations,\n }\n return Response(response)\n\n\ndef _public_projects():\n \"\"\"Return all public projects.\"\"\"\n return Project.objects.public().published().select_related('partners')\n\n\ndef _page_organisations(page):\n \"\"\"Dig out the list or organisations to use.\"\"\"\n projects = org_projects(page.organisation) if page.partner_projects else _public_projects()\n keyword_projects = apply_keywords(page, projects)\n return keyword_projects.all_partners()\n\n\ndef _create_filters_query(request):\n \"\"\"Returns a Q object expression based on query parameters.\"\"\"\n location_param = int_or_none(request.GET.get('location'))\n title_or_subtitle_param = request.GET.get('title_or_subtitle')\n\n location_filter = (\n get_m49_filter(location_param, use_recipient_country=False) if location_param else None\n )\n title_filter = (\n Q(name__icontains=title_or_subtitle_param) |\n Q(long_name__icontains=title_or_subtitle_param)\n ) if title_or_subtitle_param else None\n all_filters = [\n location_filter,\n ]\n filters = filter(None, all_filters)\n return reduce(lambda x, y: x & y, filters) if filters else None, title_filter\n", "path": "akvo/rest/views/organisation.py"}]} | 1,967 | 121 |
gh_patches_debug_11080 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-6581 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Installation on Armv6l creates a Linux-32bit-unknown bootloader.
## Description of the issue
When installing on a RPi 1b you get an incorrectly named bootloader created. The directory is called Linux-32bit-unknown and not Linux-32bit-arm.
### Context information (for bug reports)
* Output of `pyinstaller --version`: ```4.8```
* Version of Python: <!-- e.g. 3.7 --> 3.7
* Platform: <!-- e.g GNU/Linux (distribution), Windows (language settings), OS X, FreeBSD --> Raspbian GNU/Linux 10 (buster)* How you installed Python: <!-- e.g. python.org/downloads, conda, brew, pyenv, apt, Windows store -->pip3 install pyinstaller
* Did you also try this on another platform? Does it work there? yes.
* [x] start with clean installation
* [x] use the latest development version
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/_shared_with_waf.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2021, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11 """
12 Code to be shared by PyInstaller and the bootloader/wscript file.
13
14 This code must not assume that either PyInstaller or any of its dependencies installed. I.e., the only imports allowed
15 in here are standard library ones. Within reason, it is preferable that this file should still run under Python 2.7 as
16 many compiler docker images still have only Python 2 installed.
17 """
18
19 import platform
20 import re
21
22
23 def _pyi_machine(machine, system):
24 # type: (str, str) -> str
25 """
26 Choose an intentionally simplified architecture identifier to be used in the bootloader's directory name.
27
28 Args:
29 machine:
30 The output of ``platform.machine()`` or any known architecture alias or shorthand that may be used by a
31 C compiler.
32 system:
33 The output of ``platform.system()`` on the target machine.
34 Returns:
35 Either a string tag or, on platforms that don't need an architecture tag, ``None``.
36
37 Ideally, we would just use ``platform.machine()`` directly, but that makes cross-compiling the bootloader almost
38 impossible, because you need to know at compile time exactly what ``platform.machine()`` will be at run time, based
39 only on the machine name alias or shorthand reported by the C compiler at the build time. Rather, use a loose
40 differentiation, and trust that anyone mixing armv6l with armv6h knows what they are doing.
41 """
42 # See the corresponding tests in tests/unit/test_compat.py for examples.
43
44 if platform.machine() == "sw_64" or platform.machine() == "loongarch64":
45 # This explicitly inhibits cross compiling the bootloader for or on SunWay and LoongArch machine.
46 return platform.machine()
47
48 if system != "Linux":
49 # No architecture specifier for anything par Linux.
50 # - Windows only has one 32 and one 64 bit architecture, but lots of aliases for each so it is both pointless
51 # and painful to give Windows an architecture specifier.
52 # - macOS is on two 64 bit architectures, but they are merged into one "universal2" bootloader.
53 # - BSD supports a wide range of architectures, but according to PyPI's download statistics, every one of our
54 # BSD users are on x86_64. This may change in the distant future.
55 return
56
57 if machine.startswith(("arm", "aarch")):
58 # ARM has a huge number of similar and aliased sub-versions, such as armv5, armv6l armv8h, aarch64.
59 return "arm"
60 if machine in ("x86_64", "x64", "x86"):
61 return "intel"
62 if re.fullmatch("i[1-6]86", machine):
63 return "intel"
64 if machine.startswith(("ppc", "powerpc")):
65 # PowerPC comes in 64 vs 32 bit and little vs big endian variants.
66 return "ppc"
67 if machine in ("mips64", "mips"):
68 return "mips"
69 # Machines with no known aliases :)
70 if machine in ("s390x",):
71 return machine
72
73 # Unknown architectures are allowed by default, but will all be placed under one directory. In theory, trying to
74 # have multiple unknown architectures in one copy of PyInstaller will not work, but that should be sufficiently
75 # unlikely to ever happen.
76 return "unknown"
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/_shared_with_waf.py b/PyInstaller/_shared_with_waf.py
--- a/PyInstaller/_shared_with_waf.py
+++ b/PyInstaller/_shared_with_waf.py
@@ -57,6 +57,10 @@
if machine.startswith(("arm", "aarch")):
# ARM has a huge number of similar and aliased sub-versions, such as armv5, armv6l armv8h, aarch64.
return "arm"
+ if machine in ("thumb"):
+ # Reported by waf/gcc when Thumb instruction set is enabled on 32-bit ARM. The platform.machine() returns "arm"
+ # regardless of the instruction set.
+ return "arm"
if machine in ("x86_64", "x64", "x86"):
return "intel"
if re.fullmatch("i[1-6]86", machine):
| {"golden_diff": "diff --git a/PyInstaller/_shared_with_waf.py b/PyInstaller/_shared_with_waf.py\n--- a/PyInstaller/_shared_with_waf.py\n+++ b/PyInstaller/_shared_with_waf.py\n@@ -57,6 +57,10 @@\n if machine.startswith((\"arm\", \"aarch\")):\n # ARM has a huge number of similar and aliased sub-versions, such as armv5, armv6l armv8h, aarch64.\n return \"arm\"\n+ if machine in (\"thumb\"):\n+ # Reported by waf/gcc when Thumb instruction set is enabled on 32-bit ARM. The platform.machine() returns \"arm\"\n+ # regardless of the instruction set.\n+ return \"arm\"\n if machine in (\"x86_64\", \"x64\", \"x86\"):\n return \"intel\"\n if re.fullmatch(\"i[1-6]86\", machine):\n", "issue": "Installation on Armv6l creates a Linux-32bit-unknown bootloader.\n## Description of the issue\r\nWhen installing on a RPi 1b you get an incorrectly named bootloader created. The directory is called Linux-32bit-unknown and not Linux-32bit-arm.\r\n\r\n### Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```4.8```\r\n* Version of Python: <!-- e.g. 3.7 --> 3.7\r\n* Platform: <!-- e.g GNU/Linux (distribution), Windows (language settings), OS X, FreeBSD --> Raspbian GNU/Linux 10 (buster)* How you installed Python: <!-- e.g. python.org/downloads, conda, brew, pyenv, apt, Windows store -->pip3 install pyinstaller\r\n* Did you also try this on another platform? Does it work there? yes.\r\n\r\n\r\n\r\n * [x] start with clean installation\r\n * [x] use the latest development version\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\"\"\"\nCode to be shared by PyInstaller and the bootloader/wscript file.\n\nThis code must not assume that either PyInstaller or any of its dependencies installed. I.e., the only imports allowed\nin here are standard library ones. Within reason, it is preferable that this file should still run under Python 2.7 as\nmany compiler docker images still have only Python 2 installed.\n\"\"\"\n\nimport platform\nimport re\n\n\ndef _pyi_machine(machine, system):\n # type: (str, str) -> str\n \"\"\"\n Choose an intentionally simplified architecture identifier to be used in the bootloader's directory name.\n\n Args:\n machine:\n The output of ``platform.machine()`` or any known architecture alias or shorthand that may be used by a\n C compiler.\n system:\n The output of ``platform.system()`` on the target machine.\n Returns:\n Either a string tag or, on platforms that don't need an architecture tag, ``None``.\n\n Ideally, we would just use ``platform.machine()`` directly, but that makes cross-compiling the bootloader almost\n impossible, because you need to know at compile time exactly what ``platform.machine()`` will be at run time, based\n only on the machine name alias or shorthand reported by the C compiler at the build time. Rather, use a loose\n differentiation, and trust that anyone mixing armv6l with armv6h knows what they are doing.\n \"\"\"\n # See the corresponding tests in tests/unit/test_compat.py for examples.\n\n if platform.machine() == \"sw_64\" or platform.machine() == \"loongarch64\":\n # This explicitly inhibits cross compiling the bootloader for or on SunWay and LoongArch machine.\n return platform.machine()\n\n if system != \"Linux\":\n # No architecture specifier for anything par Linux.\n # - Windows only has one 32 and one 64 bit architecture, but lots of aliases for each so it is both pointless\n # and painful to give Windows an architecture specifier.\n # - macOS is on two 64 bit architectures, but they are merged into one \"universal2\" bootloader.\n # - BSD supports a wide range of architectures, but according to PyPI's download statistics, every one of our\n # BSD users are on x86_64. This may change in the distant future.\n return\n\n if machine.startswith((\"arm\", \"aarch\")):\n # ARM has a huge number of similar and aliased sub-versions, such as armv5, armv6l armv8h, aarch64.\n return \"arm\"\n if machine in (\"x86_64\", \"x64\", \"x86\"):\n return \"intel\"\n if re.fullmatch(\"i[1-6]86\", machine):\n return \"intel\"\n if machine.startswith((\"ppc\", \"powerpc\")):\n # PowerPC comes in 64 vs 32 bit and little vs big endian variants.\n return \"ppc\"\n if machine in (\"mips64\", \"mips\"):\n return \"mips\"\n # Machines with no known aliases :)\n if machine in (\"s390x\",):\n return machine\n\n # Unknown architectures are allowed by default, but will all be placed under one directory. In theory, trying to\n # have multiple unknown architectures in one copy of PyInstaller will not work, but that should be sufficiently\n # unlikely to ever happen.\n return \"unknown\"\n", "path": "PyInstaller/_shared_with_waf.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\"\"\"\nCode to be shared by PyInstaller and the bootloader/wscript file.\n\nThis code must not assume that either PyInstaller or any of its dependencies installed. I.e., the only imports allowed\nin here are standard library ones. Within reason, it is preferable that this file should still run under Python 2.7 as\nmany compiler docker images still have only Python 2 installed.\n\"\"\"\n\nimport platform\nimport re\n\n\ndef _pyi_machine(machine, system):\n # type: (str, str) -> str\n \"\"\"\n Choose an intentionally simplified architecture identifier to be used in the bootloader's directory name.\n\n Args:\n machine:\n The output of ``platform.machine()`` or any known architecture alias or shorthand that may be used by a\n C compiler.\n system:\n The output of ``platform.system()`` on the target machine.\n Returns:\n Either a string tag or, on platforms that don't need an architecture tag, ``None``.\n\n Ideally, we would just use ``platform.machine()`` directly, but that makes cross-compiling the bootloader almost\n impossible, because you need to know at compile time exactly what ``platform.machine()`` will be at run time, based\n only on the machine name alias or shorthand reported by the C compiler at the build time. Rather, use a loose\n differentiation, and trust that anyone mixing armv6l with armv6h knows what they are doing.\n \"\"\"\n # See the corresponding tests in tests/unit/test_compat.py for examples.\n\n if platform.machine() == \"sw_64\" or platform.machine() == \"loongarch64\":\n # This explicitly inhibits cross compiling the bootloader for or on SunWay and LoongArch machine.\n return platform.machine()\n\n if system != \"Linux\":\n # No architecture specifier for anything par Linux.\n # - Windows only has one 32 and one 64 bit architecture, but lots of aliases for each so it is both pointless\n # and painful to give Windows an architecture specifier.\n # - macOS is on two 64 bit architectures, but they are merged into one \"universal2\" bootloader.\n # - BSD supports a wide range of architectures, but according to PyPI's download statistics, every one of our\n # BSD users are on x86_64. This may change in the distant future.\n return\n\n if machine.startswith((\"arm\", \"aarch\")):\n # ARM has a huge number of similar and aliased sub-versions, such as armv5, armv6l armv8h, aarch64.\n return \"arm\"\n if machine in (\"thumb\"):\n # Reported by waf/gcc when Thumb instruction set is enabled on 32-bit ARM. The platform.machine() returns \"arm\"\n # regardless of the instruction set.\n return \"arm\"\n if machine in (\"x86_64\", \"x64\", \"x86\"):\n return \"intel\"\n if re.fullmatch(\"i[1-6]86\", machine):\n return \"intel\"\n if machine.startswith((\"ppc\", \"powerpc\")):\n # PowerPC comes in 64 vs 32 bit and little vs big endian variants.\n return \"ppc\"\n if machine in (\"mips64\", \"mips\"):\n return \"mips\"\n # Machines with no known aliases :)\n if machine in (\"s390x\",):\n return machine\n\n # Unknown architectures are allowed by default, but will all be placed under one directory. In theory, trying to\n # have multiple unknown architectures in one copy of PyInstaller will not work, but that should be sufficiently\n # unlikely to ever happen.\n return \"unknown\"\n", "path": "PyInstaller/_shared_with_waf.py"}]} | 1,458 | 210 |
gh_patches_debug_11874 | rasdani/github-patches | git_diff | kubeflow__pipelines-2213 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Component] GCP dataproc create_cluster component cannot correctly specify image_version.
Issue:
When specifying not-null image version, create_cluster component raises:
`<HttpError 400 when requesting
https://dataproc.googleapis.com/v1/projects/ml-pipeline-dogfood/regions/us-central1/clusters?alt=json&requestId=7c933fdacb068cd6811fb40b8334a3d4
returned "Invalid JSON payload received. Unknown name "softwareConfig" at 'cluster': Cannot find field.">`
Initial investigation shows that in [here](https://github.com/kubeflow/pipelines/blob/7dab30085e2edda6fb4ecb61a61c9f37664009a1/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py#L70), `softwareConfig` was specified as a top-layer member of cluster payload, but actually it should be a member of cluster['config']. See [this reference](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster).
Will work out a fix shortly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import json
15
16 from fire import decorators
17 from ._client import DataprocClient
18 from kfp_component.core import KfpExecutionContext, display
19 from .. import common as gcp_common
20
21 @decorators.SetParseFns(image_version=str)
22 def create_cluster(project_id, region, name=None, name_prefix=None,
23 initialization_actions=None, config_bucket=None, image_version=None,
24 cluster=None, wait_interval=30):
25 """Creates a DataProc cluster under a project.
26
27 Args:
28 project_id (str): Required. The ID of the Google Cloud Platform project
29 that the cluster belongs to.
30 region (str): Required. The Cloud Dataproc region in which to handle the
31 request.
32 name (str): Optional. The cluster name. Cluster names within a project
33 must be unique. Names of deleted clusters can be reused.
34 name_prefix (str): Optional. The prefix of the cluster name.
35 initialization_actions (list): Optional. List of GCS URIs of executables
36 to execute on each node after config is completed. By default,
37 executables are run on master and all worker nodes.
38 config_bucket (str): Optional. A Google Cloud Storage bucket used to
39 stage job dependencies, config files, and job driver console output.
40 image_version (str): Optional. The version of software inside the cluster.
41 cluster (dict): Optional. The full cluster config. See [full details](
42 https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster)
43 wait_interval (int): The wait seconds between polling the operation.
44 Defaults to 30s.
45
46 Returns:
47 The created cluster object.
48
49 Output Files:
50 $KFP_OUTPUT_PATH/dataproc/cluster_name.txt: The cluster name of the
51 created cluster.
52 """
53 if not cluster:
54 cluster = {}
55 cluster['projectId'] = project_id
56 if 'config' not in cluster:
57 cluster['config'] = {}
58 if name:
59 cluster['clusterName'] = name
60 if initialization_actions:
61 cluster['config']['initializationActions'] = list(
62 map(lambda file: {
63 'executableFile': file
64 }, initialization_actions)
65 )
66 if config_bucket:
67 cluster['config']['configBucket'] = config_bucket
68 if image_version:
69 if 'softwareConfig' not in cluster:
70 cluster['softwareConfig'] = {}
71 cluster['softwareConfig']['imageVersion'] = image_version
72
73 return _create_cluster_internal(project_id, region, cluster, name_prefix,
74 wait_interval)
75
76 def _create_cluster_internal(project_id, region, cluster, name_prefix,
77 wait_interval):
78 client = DataprocClient()
79 operation_name = None
80 with KfpExecutionContext(
81 on_cancel=lambda: client.cancel_operation(operation_name)) as ctx:
82 _set_cluster_name(cluster, ctx.context_id(), name_prefix)
83 _dump_metadata(cluster, region)
84 operation = client.create_cluster(project_id, region, cluster,
85 request_id=ctx.context_id())
86 operation_name = operation.get('name')
87 operation = client.wait_for_operation_done(operation_name,
88 wait_interval)
89 return _dump_cluster(operation.get('response'))
90
91 def _set_cluster_name(cluster, context_id, name_prefix):
92 if 'clusterName' in cluster:
93 return
94 if not name_prefix:
95 name_prefix = 'cluster'
96 cluster['clusterName'] = name_prefix + '-' + context_id
97
98 def _dump_metadata(cluster, region):
99 display.display(display.Link(
100 'https://console.cloud.google.com/dataproc/clusters/{}?project={}®ion={}'.format(
101 cluster.get('clusterName'), cluster.get('projectId'), region),
102 'Cluster Details'
103 ))
104
105 def _dump_cluster(cluster):
106 gcp_common.dump_file('/tmp/kfp/output/dataproc/cluster.json',
107 json.dumps(cluster))
108 gcp_common.dump_file('/tmp/kfp/output/dataproc/cluster_name.txt',
109 cluster.get('clusterName'))
110 return cluster
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py b/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py
--- a/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py
+++ b/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py
@@ -66,9 +66,9 @@
if config_bucket:
cluster['config']['configBucket'] = config_bucket
if image_version:
- if 'softwareConfig' not in cluster:
- cluster['softwareConfig'] = {}
- cluster['softwareConfig']['imageVersion'] = image_version
+ if 'softwareConfig' not in cluster['config']:
+ cluster['config']['softwareConfig'] = {}
+ cluster['config']['softwareConfig']['imageVersion'] = image_version
return _create_cluster_internal(project_id, region, cluster, name_prefix,
wait_interval)
| {"golden_diff": "diff --git a/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py b/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py\n--- a/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py\n+++ b/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py\n@@ -66,9 +66,9 @@\n if config_bucket:\n cluster['config']['configBucket'] = config_bucket\n if image_version:\n- if 'softwareConfig' not in cluster:\n- cluster['softwareConfig'] = {}\n- cluster['softwareConfig']['imageVersion'] = image_version\n+ if 'softwareConfig' not in cluster['config']:\n+ cluster['config']['softwareConfig'] = {}\n+ cluster['config']['softwareConfig']['imageVersion'] = image_version\n \n return _create_cluster_internal(project_id, region, cluster, name_prefix,\n wait_interval)\n", "issue": "[Component] GCP dataproc create_cluster component cannot correctly specify image_version.\nIssue:\r\nWhen specifying not-null image version, create_cluster component raises:\r\n`<HttpError 400 when requesting \r\nhttps://dataproc.googleapis.com/v1/projects/ml-pipeline-dogfood/regions/us-central1/clusters?alt=json&requestId=7c933fdacb068cd6811fb40b8334a3d4\r\n returned \"Invalid JSON payload received. Unknown name \"softwareConfig\" at 'cluster': Cannot find field.\">`\r\n\r\nInitial investigation shows that in [here](https://github.com/kubeflow/pipelines/blob/7dab30085e2edda6fb4ecb61a61c9f37664009a1/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py#L70), `softwareConfig` was specified as a top-layer member of cluster payload, but actually it should be a member of cluster['config']. See [this reference](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster).\r\n\r\nWill work out a fix shortly.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\n\nfrom fire import decorators\nfrom ._client import DataprocClient\nfrom kfp_component.core import KfpExecutionContext, display\nfrom .. import common as gcp_common\n\[email protected](image_version=str)\ndef create_cluster(project_id, region, name=None, name_prefix=None,\n initialization_actions=None, config_bucket=None, image_version=None,\n cluster=None, wait_interval=30):\n \"\"\"Creates a DataProc cluster under a project.\n\n Args:\n project_id (str): Required. The ID of the Google Cloud Platform project \n that the cluster belongs to.\n region (str): Required. The Cloud Dataproc region in which to handle the \n request.\n name (str): Optional. The cluster name. Cluster names within a project\n must be unique. Names of deleted clusters can be reused.\n name_prefix (str): Optional. The prefix of the cluster name.\n initialization_actions (list): Optional. List of GCS URIs of executables \n to execute on each node after config is completed. By default,\n executables are run on master and all worker nodes. \n config_bucket (str): Optional. A Google Cloud Storage bucket used to \n stage job dependencies, config files, and job driver console output.\n image_version (str): Optional. The version of software inside the cluster.\n cluster (dict): Optional. The full cluster config. See [full details](\n https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster)\n wait_interval (int): The wait seconds between polling the operation. \n Defaults to 30s.\n\n Returns:\n The created cluster object.\n\n Output Files:\n $KFP_OUTPUT_PATH/dataproc/cluster_name.txt: The cluster name of the \n created cluster.\n \"\"\"\n if not cluster:\n cluster = {}\n cluster['projectId'] = project_id\n if 'config' not in cluster:\n cluster['config'] = {}\n if name:\n cluster['clusterName'] = name\n if initialization_actions:\n cluster['config']['initializationActions'] = list(\n map(lambda file: {\n 'executableFile': file\n }, initialization_actions)\n )\n if config_bucket:\n cluster['config']['configBucket'] = config_bucket\n if image_version:\n if 'softwareConfig' not in cluster:\n cluster['softwareConfig'] = {}\n cluster['softwareConfig']['imageVersion'] = image_version\n\n return _create_cluster_internal(project_id, region, cluster, name_prefix,\n wait_interval)\n\ndef _create_cluster_internal(project_id, region, cluster, name_prefix, \n wait_interval):\n client = DataprocClient()\n operation_name = None\n with KfpExecutionContext(\n on_cancel=lambda: client.cancel_operation(operation_name)) as ctx:\n _set_cluster_name(cluster, ctx.context_id(), name_prefix)\n _dump_metadata(cluster, region)\n operation = client.create_cluster(project_id, region, cluster, \n request_id=ctx.context_id())\n operation_name = operation.get('name')\n operation = client.wait_for_operation_done(operation_name, \n wait_interval)\n return _dump_cluster(operation.get('response'))\n\ndef _set_cluster_name(cluster, context_id, name_prefix):\n if 'clusterName' in cluster:\n return\n if not name_prefix:\n name_prefix = 'cluster'\n cluster['clusterName'] = name_prefix + '-' + context_id\n\ndef _dump_metadata(cluster, region):\n display.display(display.Link(\n 'https://console.cloud.google.com/dataproc/clusters/{}?project={}®ion={}'.format(\n cluster.get('clusterName'), cluster.get('projectId'), region),\n 'Cluster Details'\n ))\n\ndef _dump_cluster(cluster):\n gcp_common.dump_file('/tmp/kfp/output/dataproc/cluster.json', \n json.dumps(cluster))\n gcp_common.dump_file('/tmp/kfp/output/dataproc/cluster_name.txt',\n cluster.get('clusterName'))\n return cluster\n", "path": "components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\n\nfrom fire import decorators\nfrom ._client import DataprocClient\nfrom kfp_component.core import KfpExecutionContext, display\nfrom .. import common as gcp_common\n\[email protected](image_version=str)\ndef create_cluster(project_id, region, name=None, name_prefix=None,\n initialization_actions=None, config_bucket=None, image_version=None,\n cluster=None, wait_interval=30):\n \"\"\"Creates a DataProc cluster under a project.\n\n Args:\n project_id (str): Required. The ID of the Google Cloud Platform project \n that the cluster belongs to.\n region (str): Required. The Cloud Dataproc region in which to handle the \n request.\n name (str): Optional. The cluster name. Cluster names within a project\n must be unique. Names of deleted clusters can be reused.\n name_prefix (str): Optional. The prefix of the cluster name.\n initialization_actions (list): Optional. List of GCS URIs of executables \n to execute on each node after config is completed. By default,\n executables are run on master and all worker nodes. \n config_bucket (str): Optional. A Google Cloud Storage bucket used to \n stage job dependencies, config files, and job driver console output.\n image_version (str): Optional. The version of software inside the cluster.\n cluster (dict): Optional. The full cluster config. See [full details](\n https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster)\n wait_interval (int): The wait seconds between polling the operation. \n Defaults to 30s.\n\n Returns:\n The created cluster object.\n\n Output Files:\n $KFP_OUTPUT_PATH/dataproc/cluster_name.txt: The cluster name of the \n created cluster.\n \"\"\"\n if not cluster:\n cluster = {}\n cluster['projectId'] = project_id\n if 'config' not in cluster:\n cluster['config'] = {}\n if name:\n cluster['clusterName'] = name\n if initialization_actions:\n cluster['config']['initializationActions'] = list(\n map(lambda file: {\n 'executableFile': file\n }, initialization_actions)\n )\n if config_bucket:\n cluster['config']['configBucket'] = config_bucket\n if image_version:\n if 'softwareConfig' not in cluster['config']:\n cluster['config']['softwareConfig'] = {}\n cluster['config']['softwareConfig']['imageVersion'] = image_version\n\n return _create_cluster_internal(project_id, region, cluster, name_prefix,\n wait_interval)\n\ndef _create_cluster_internal(project_id, region, cluster, name_prefix, \n wait_interval):\n client = DataprocClient()\n operation_name = None\n with KfpExecutionContext(\n on_cancel=lambda: client.cancel_operation(operation_name)) as ctx:\n _set_cluster_name(cluster, ctx.context_id(), name_prefix)\n _dump_metadata(cluster, region)\n operation = client.create_cluster(project_id, region, cluster, \n request_id=ctx.context_id())\n operation_name = operation.get('name')\n operation = client.wait_for_operation_done(operation_name, \n wait_interval)\n return _dump_cluster(operation.get('response'))\n\ndef _set_cluster_name(cluster, context_id, name_prefix):\n if 'clusterName' in cluster:\n return\n if not name_prefix:\n name_prefix = 'cluster'\n cluster['clusterName'] = name_prefix + '-' + context_id\n\ndef _dump_metadata(cluster, region):\n display.display(display.Link(\n 'https://console.cloud.google.com/dataproc/clusters/{}?project={}®ion={}'.format(\n cluster.get('clusterName'), cluster.get('projectId'), region),\n 'Cluster Details'\n ))\n\ndef _dump_cluster(cluster):\n gcp_common.dump_file('/tmp/kfp/output/dataproc/cluster.json', \n json.dumps(cluster))\n gcp_common.dump_file('/tmp/kfp/output/dataproc/cluster_name.txt',\n cluster.get('clusterName'))\n return cluster\n", "path": "components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py"}]} | 1,743 | 212 |
gh_patches_debug_3201 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-337 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DOC] Remove example.py in examples directory
The code in the example.py file currently reads some data frame from a file called 'dirty_data.xls'.
We can change this to include a concrete example.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/example.py`
Content:
```
1 import pandas as pd
2
3 import janitor as jn
4
5 df = (
6 pd.read_excel("dirty_data.xlsx")
7 .clean_names()
8 .remove_empty()
9 .rename_column("%_allocated", "percent_allocated")
10 .rename_column("full_time_", "full_time")
11 .coalesce(["certification", "certification_1"], "certification")
12 .encode_categorical(["subject", "employee_status", "full_time"])
13 .convert_excel_date("hire_date")
14 )
15
16 print(df)
17 print(df.original_names)
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/example.py b/examples/example.py
deleted file mode 100644
--- a/examples/example.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import pandas as pd
-
-import janitor as jn
-
-df = (
- pd.read_excel("dirty_data.xlsx")
- .clean_names()
- .remove_empty()
- .rename_column("%_allocated", "percent_allocated")
- .rename_column("full_time_", "full_time")
- .coalesce(["certification", "certification_1"], "certification")
- .encode_categorical(["subject", "employee_status", "full_time"])
- .convert_excel_date("hire_date")
-)
-
-print(df)
-print(df.original_names)
| {"golden_diff": "diff --git a/examples/example.py b/examples/example.py\ndeleted file mode 100644\n--- a/examples/example.py\n+++ /dev/null\n@@ -1,17 +0,0 @@\n-import pandas as pd\n-\n-import janitor as jn\n-\n-df = (\n- pd.read_excel(\"dirty_data.xlsx\")\n- .clean_names()\n- .remove_empty()\n- .rename_column(\"%_allocated\", \"percent_allocated\")\n- .rename_column(\"full_time_\", \"full_time\")\n- .coalesce([\"certification\", \"certification_1\"], \"certification\")\n- .encode_categorical([\"subject\", \"employee_status\", \"full_time\"])\n- .convert_excel_date(\"hire_date\")\n-)\n-\n-print(df)\n-print(df.original_names)\n", "issue": "[DOC] Remove example.py in examples directory\nThe code in the example.py file currently reads some data frame from a file called 'dirty_data.xls'.\r\nWe can change this to include a concrete example.\n", "before_files": [{"content": "import pandas as pd\n\nimport janitor as jn\n\ndf = (\n pd.read_excel(\"dirty_data.xlsx\")\n .clean_names()\n .remove_empty()\n .rename_column(\"%_allocated\", \"percent_allocated\")\n .rename_column(\"full_time_\", \"full_time\")\n .coalesce([\"certification\", \"certification_1\"], \"certification\")\n .encode_categorical([\"subject\", \"employee_status\", \"full_time\"])\n .convert_excel_date(\"hire_date\")\n)\n\nprint(df)\nprint(df.original_names)\n", "path": "examples/example.py"}], "after_files": [{"content": null, "path": "examples/example.py"}]} | 438 | 167 |
gh_patches_debug_24396 | rasdani/github-patches | git_diff | graspologic-org__graspologic-438 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
remove ari return value from AutoGMM.fit_predict
Doesn't match with API well, should just get rid of this
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `graspy/cluster/base.py`
Content:
```
1 # Copyright 2019 NeuroData (http://neurodata.io)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from abc import ABC, abstractmethod
16
17 from sklearn.base import BaseEstimator, ClusterMixin
18 from sklearn.metrics import adjusted_rand_score
19 from sklearn.utils.validation import check_is_fitted
20
21
22 class BaseCluster(ABC, BaseEstimator, ClusterMixin):
23 """
24 Base clustering class.
25 """
26
27 @abstractmethod
28 def fit(self, X, y=None):
29 """
30 Compute clusters based on given method.
31
32 Parameters
33 ----------
34 X : array-like, shape (n_samples, n_features)
35 List of n_features-dimensional data points. Each row
36 corresponds to a single data point.
37
38 y : array-like, shape (n_samples,), optional (default=None)
39 List of labels for X if available. Used to compute
40 ARI scores.
41
42 Returns
43 -------
44 self
45 """
46
47 def predict(self, X, y=None): # pragma: no cover
48 """
49 Predict clusters based on best model.
50
51 Parameters
52 ----------
53 X : array-like, shape (n_samples, n_features)
54 List of n_features-dimensional data points. Each row
55 corresponds to a single data point.
56 y : array-like, shape (n_samples, ), optional (default=None)
57 List of labels for X if available. Used to compute
58 ARI scores.
59
60 Returns
61 -------
62 labels : array, shape (n_samples,)
63 Component labels.
64
65 ari : float
66 Adjusted Rand index. Only returned if y is given.
67 """
68 # Check if fit is already called
69 check_is_fitted(self, ["model_"], all_or_any=all)
70 labels = self.model_.predict(X)
71
72 if y is None:
73 return labels
74 else:
75 ari = adjusted_rand_score(y, labels)
76 return labels, ari
77
78 def fit_predict(self, X, y=None): # pragma: no cover
79 """
80 Fit the models and predict clusters based on best model.
81
82 Parameters
83 ----------
84 X : array-like, shape (n_samples, n_features)
85 List of n_features-dimensional data points. Each row
86 corresponds to a single data point.
87
88 y : array-like, shape (n_samples,), optional (default=None)
89 List of labels for X if available. Used to compute
90 ARI scores.
91
92 Returns
93 -------
94 labels : array, shape (n_samples,)
95 Component labels.
96
97 ari : float
98 Adjusted Rand index. Only returned if y is given.
99 """
100 self.fit(X, y)
101
102 if y is None:
103 labels = self.predict(X, y)
104 return labels
105 else:
106 labels, ari = self.predict(X, y)
107 return labels, ari
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/graspy/cluster/base.py b/graspy/cluster/base.py
--- a/graspy/cluster/base.py
+++ b/graspy/cluster/base.py
@@ -61,19 +61,12 @@
-------
labels : array, shape (n_samples,)
Component labels.
-
- ari : float
- Adjusted Rand index. Only returned if y is given.
"""
# Check if fit is already called
check_is_fitted(self, ["model_"], all_or_any=all)
labels = self.model_.predict(X)
- if y is None:
- return labels
- else:
- ari = adjusted_rand_score(y, labels)
- return labels, ari
+ return labels
def fit_predict(self, X, y=None): # pragma: no cover
"""
@@ -93,15 +86,8 @@
-------
labels : array, shape (n_samples,)
Component labels.
-
- ari : float
- Adjusted Rand index. Only returned if y is given.
"""
self.fit(X, y)
- if y is None:
- labels = self.predict(X, y)
- return labels
- else:
- labels, ari = self.predict(X, y)
- return labels, ari
+ labels = self.predict(X, y)
+ return labels
| {"golden_diff": "diff --git a/graspy/cluster/base.py b/graspy/cluster/base.py\n--- a/graspy/cluster/base.py\n+++ b/graspy/cluster/base.py\n@@ -61,19 +61,12 @@\n -------\n labels : array, shape (n_samples,)\n Component labels.\n-\n- ari : float\n- Adjusted Rand index. Only returned if y is given.\n \"\"\"\n # Check if fit is already called\n check_is_fitted(self, [\"model_\"], all_or_any=all)\n labels = self.model_.predict(X)\n \n- if y is None:\n- return labels\n- else:\n- ari = adjusted_rand_score(y, labels)\n- return labels, ari\n+ return labels\n \n def fit_predict(self, X, y=None): # pragma: no cover\n \"\"\"\n@@ -93,15 +86,8 @@\n -------\n labels : array, shape (n_samples,)\n Component labels.\n-\n- ari : float\n- Adjusted Rand index. Only returned if y is given.\n \"\"\"\n self.fit(X, y)\n \n- if y is None:\n- labels = self.predict(X, y)\n- return labels\n- else:\n- labels, ari = self.predict(X, y)\n- return labels, ari\n+ labels = self.predict(X, y)\n+ return labels\n", "issue": "remove ari return value from AutoGMM.fit_predict\nDoesn't match with API well, should just get rid of this\n", "before_files": [{"content": "# Copyright 2019 NeuroData (http://neurodata.io)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\n\nfrom sklearn.base import BaseEstimator, ClusterMixin\nfrom sklearn.metrics import adjusted_rand_score\nfrom sklearn.utils.validation import check_is_fitted\n\n\nclass BaseCluster(ABC, BaseEstimator, ClusterMixin):\n \"\"\"\n Base clustering class.\n \"\"\"\n\n @abstractmethod\n def fit(self, X, y=None):\n \"\"\"\n Compute clusters based on given method.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : array-like, shape (n_samples,), optional (default=None)\n List of labels for X if available. Used to compute\n ARI scores.\n\n Returns\n -------\n self\n \"\"\"\n\n def predict(self, X, y=None): # pragma: no cover\n \"\"\"\n Predict clusters based on best model.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n y : array-like, shape (n_samples, ), optional (default=None)\n List of labels for X if available. Used to compute\n ARI scores.\n\n Returns\n -------\n labels : array, shape (n_samples,)\n Component labels.\n\n ari : float\n Adjusted Rand index. Only returned if y is given.\n \"\"\"\n # Check if fit is already called\n check_is_fitted(self, [\"model_\"], all_or_any=all)\n labels = self.model_.predict(X)\n\n if y is None:\n return labels\n else:\n ari = adjusted_rand_score(y, labels)\n return labels, ari\n\n def fit_predict(self, X, y=None): # pragma: no cover\n \"\"\"\n Fit the models and predict clusters based on best model.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : array-like, shape (n_samples,), optional (default=None)\n List of labels for X if available. Used to compute\n ARI scores.\n\n Returns\n -------\n labels : array, shape (n_samples,)\n Component labels.\n\n ari : float\n Adjusted Rand index. Only returned if y is given.\n \"\"\"\n self.fit(X, y)\n\n if y is None:\n labels = self.predict(X, y)\n return labels\n else:\n labels, ari = self.predict(X, y)\n return labels, ari\n", "path": "graspy/cluster/base.py"}], "after_files": [{"content": "# Copyright 2019 NeuroData (http://neurodata.io)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\n\nfrom sklearn.base import BaseEstimator, ClusterMixin\nfrom sklearn.metrics import adjusted_rand_score\nfrom sklearn.utils.validation import check_is_fitted\n\n\nclass BaseCluster(ABC, BaseEstimator, ClusterMixin):\n \"\"\"\n Base clustering class.\n \"\"\"\n\n @abstractmethod\n def fit(self, X, y=None):\n \"\"\"\n Compute clusters based on given method.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : array-like, shape (n_samples,), optional (default=None)\n List of labels for X if available. Used to compute\n ARI scores.\n\n Returns\n -------\n self\n \"\"\"\n\n def predict(self, X, y=None): # pragma: no cover\n \"\"\"\n Predict clusters based on best model.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n y : array-like, shape (n_samples, ), optional (default=None)\n List of labels for X if available. Used to compute\n ARI scores.\n\n Returns\n -------\n labels : array, shape (n_samples,)\n Component labels.\n \"\"\"\n # Check if fit is already called\n check_is_fitted(self, [\"model_\"], all_or_any=all)\n labels = self.model_.predict(X)\n\n return labels\n\n def fit_predict(self, X, y=None): # pragma: no cover\n \"\"\"\n Fit the models and predict clusters based on best model.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : array-like, shape (n_samples,), optional (default=None)\n List of labels for X if available. Used to compute\n ARI scores.\n\n Returns\n -------\n labels : array, shape (n_samples,)\n Component labels.\n \"\"\"\n self.fit(X, y)\n\n labels = self.predict(X, y)\n return labels\n", "path": "graspy/cluster/base.py"}]} | 1,225 | 313 |
gh_patches_debug_18712 | rasdani/github-patches | git_diff | TileDB-Inc__TileDB-Py-1936 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show enumerated value-types in enum-printer
Repro:
```
>>> A=tiledb.open('/var/p/obs')
>>> A.enum('louvain')
Enumeration(name='louvain', cell_val_num=4294967295, ordered=False, values=['CD4 T cells', 'CD14+ Monocytes', 'B cells', 'CD8 T cells', 'NK cells', 'FCGR3A+ Monocytes', 'Dendritic cells', 'Megakaryocytes'])
>>> A.enum('louvain').dtype
dtype('<U1')
>>> A.enum('louvain').dtype.name
'str32'
```
Request: `A.enum('louvain')` should reveal the value dtype.
---
sc-43628
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tiledb/enumeration.py`
Content:
```
1 from __future__ import annotations
2
3 import io
4 from typing import Any, Optional, Sequence
5
6 import numpy as np
7 from numpy.typing import NDArray
8
9 import tiledb.cc as lt
10
11 from .ctx import Ctx, CtxMixin
12 from .datatypes import DataType
13
14
15 class Enumeration(CtxMixin, lt.Enumeration):
16 """
17 Represents a TileDB Enumeration.
18 """
19
20 def __init__(
21 self,
22 name: str,
23 ordered: bool,
24 values: Optional[Sequence[Any]] = None,
25 dtype: Optional[np.dtype] = None,
26 ctx: Optional[Ctx] = None,
27 ):
28 """Class representing the TileDB Enumeration.
29
30 :param name: The name of the to-be created Enumeration
31 :type name: str
32 :param ordered: Whether or not to consider this enumeration ordered
33 :type ordered: bool
34 :param values: A Numpy array of values for this enumeration
35 :type values: np.array
36 :param ctx: A TileDB context
37 :type ctx: tiledb.Ctx
38 """
39 if values is None or len(values) == 0:
40 if dtype is None:
41 raise ValueError("dtype must be provied for empty enumeration")
42 super().__init__(ctx, name, np.dtype(dtype), ordered)
43
44 values = np.array(values)
45 if np.dtype(values.dtype).kind in "US":
46 dtype = (
47 lt.DataType.STRING_UTF8
48 if values.dtype.kind == "U"
49 else lt.DataType.STRING_ASCII
50 )
51 super().__init__(ctx, name, values, ordered, dtype)
52 else:
53 super().__init__(ctx, name, ordered, values, np.array([]))
54
55 @property
56 def name(self) -> str:
57 """The enumeration label string.
58
59 :rtype: str
60 """
61 return super().name
62
63 @property
64 def dtype(self) -> np.dtype:
65 """Numpy dtype representation of the enumeration type.
66
67 :rtype: numpy.dtype
68 """
69 return DataType.from_tiledb(super().type).np_dtype
70
71 @property
72 def cell_val_num(self) -> int:
73 """The enumeration's cell value number.
74
75 :rtype: int
76 """
77 return super().cell_val_num
78
79 @property
80 def ordered(self) -> bool:
81 """True if the enumeration is ordered.
82
83 :rtype: bool
84 """
85 return super().ordered
86
87 def values(self) -> NDArray:
88 """The values of the enumeration.
89
90 :rtype: NDArray
91 """
92 if self.dtype.kind == "U":
93 return np.array(super().str_values(), dtype=np.str_)
94 elif self.dtype.kind == "S":
95 return np.array(super().str_values(), dtype=np.bytes_)
96 else:
97 return np.array(super().values(), dtype=self.dtype)
98
99 def extend(self, values: Sequence[Any]) -> Enumeration:
100 """Add additional values to the enumeration.
101
102 :rtype: Enumeration
103 """
104 values = np.array(values)
105 if self.dtype.kind in "US" and values.dtype.kind not in "US":
106 raise lt.TileDBError("Passed in enumeration must be string type")
107
108 if np.issubdtype(self.dtype, np.integer) and not np.issubdtype(
109 values.dtype, np.integer
110 ):
111 raise lt.TileDBError("Passed in enumeration must be integer type")
112
113 return Enumeration.from_pybind11(self._ctx, super().extend(values))
114
115 def __eq__(self, other):
116 if not isinstance(other, Enumeration):
117 return False
118
119 return all(
120 [
121 self.name == other.name,
122 self.dtype == other.dtype,
123 self.cell_val_num == other.cell_val_num,
124 self.ordered == other.ordered,
125 np.array_equal(self.values(), other.values()),
126 ]
127 )
128
129 def __repr__(self):
130 # use safe repr if pybind11 constructor failed
131 if self._ctx is None:
132 return object.__repr__(self)
133
134 return f"Enumeration(name='{self.name}', cell_val_num={self.cell_val_num}, ordered={self.ordered}, values={list(self.values())})"
135
136 def _repr_html_(self):
137 output = io.StringIO()
138
139 output.write("<table>")
140 output.write("<tr>")
141 output.write("<th>Name</th>")
142 output.write("<th>Data Type</th>")
143 output.write("<th>Ordered</th>")
144 output.write("</tr>")
145 output.write(f"{self._repr_html_row_only_()}")
146 output.write("</table>")
147
148 return output.getvalue()
149
150 def _repr_html_row_only_(self):
151 output = io.StringIO()
152
153 output.write("<tr>")
154 output.write(f"<td>{self.name}</td>")
155 output.write(f"<td>{self.dtype}</td>")
156 output.write(f"<td>{self.cell_val_num}</td>")
157 output.write(f"<td>{self.ordered}</td>")
158 output.write("</tr>")
159
160 return output.getvalue()
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tiledb/enumeration.py b/tiledb/enumeration.py
--- a/tiledb/enumeration.py
+++ b/tiledb/enumeration.py
@@ -33,6 +33,8 @@
:type ordered: bool
:param values: A Numpy array of values for this enumeration
:type values: np.array
+ :param dtype: The Numpy data type for this enumeration
+ :type dtype: np.dtype
:param ctx: A TileDB context
:type ctx: tiledb.Ctx
"""
@@ -131,7 +133,7 @@
if self._ctx is None:
return object.__repr__(self)
- return f"Enumeration(name='{self.name}', cell_val_num={self.cell_val_num}, ordered={self.ordered}, values={list(self.values())})"
+ return f"Enumeration(name='{self.name}', dtype={self.dtype}, dtype_name='{self.dtype.name}', cell_val_num={self.cell_val_num}, ordered={self.ordered}, values={list(self.values())})"
def _repr_html_(self):
output = io.StringIO()
| {"golden_diff": "diff --git a/tiledb/enumeration.py b/tiledb/enumeration.py\n--- a/tiledb/enumeration.py\n+++ b/tiledb/enumeration.py\n@@ -33,6 +33,8 @@\n :type ordered: bool\n :param values: A Numpy array of values for this enumeration\n :type values: np.array\n+ :param dtype: The Numpy data type for this enumeration\n+ :type dtype: np.dtype\n :param ctx: A TileDB context\n :type ctx: tiledb.Ctx\n \"\"\"\n@@ -131,7 +133,7 @@\n if self._ctx is None:\n return object.__repr__(self)\n \n- return f\"Enumeration(name='{self.name}', cell_val_num={self.cell_val_num}, ordered={self.ordered}, values={list(self.values())})\"\n+ return f\"Enumeration(name='{self.name}', dtype={self.dtype}, dtype_name='{self.dtype.name}', cell_val_num={self.cell_val_num}, ordered={self.ordered}, values={list(self.values())})\"\n \n def _repr_html_(self):\n output = io.StringIO()\n", "issue": "Show enumerated value-types in enum-printer\nRepro:\r\n\r\n```\r\n>>> A=tiledb.open('/var/p/obs')\r\n\r\n>>> A.enum('louvain')\r\nEnumeration(name='louvain', cell_val_num=4294967295, ordered=False, values=['CD4 T cells', 'CD14+ Monocytes', 'B cells', 'CD8 T cells', 'NK cells', 'FCGR3A+ Monocytes', 'Dendritic cells', 'Megakaryocytes'])\r\n\r\n>>> A.enum('louvain').dtype\r\ndtype('<U1')\r\n\r\n>>> A.enum('louvain').dtype.name\r\n'str32'\r\n```\r\n\r\nRequest: `A.enum('louvain')` should reveal the value dtype.\r\n\r\n---\r\nsc-43628\n", "before_files": [{"content": "from __future__ import annotations\n\nimport io\nfrom typing import Any, Optional, Sequence\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nimport tiledb.cc as lt\n\nfrom .ctx import Ctx, CtxMixin\nfrom .datatypes import DataType\n\n\nclass Enumeration(CtxMixin, lt.Enumeration):\n \"\"\"\n Represents a TileDB Enumeration.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n ordered: bool,\n values: Optional[Sequence[Any]] = None,\n dtype: Optional[np.dtype] = None,\n ctx: Optional[Ctx] = None,\n ):\n \"\"\"Class representing the TileDB Enumeration.\n\n :param name: The name of the to-be created Enumeration\n :type name: str\n :param ordered: Whether or not to consider this enumeration ordered\n :type ordered: bool\n :param values: A Numpy array of values for this enumeration\n :type values: np.array\n :param ctx: A TileDB context\n :type ctx: tiledb.Ctx\n \"\"\"\n if values is None or len(values) == 0:\n if dtype is None:\n raise ValueError(\"dtype must be provied for empty enumeration\")\n super().__init__(ctx, name, np.dtype(dtype), ordered)\n\n values = np.array(values)\n if np.dtype(values.dtype).kind in \"US\":\n dtype = (\n lt.DataType.STRING_UTF8\n if values.dtype.kind == \"U\"\n else lt.DataType.STRING_ASCII\n )\n super().__init__(ctx, name, values, ordered, dtype)\n else:\n super().__init__(ctx, name, ordered, values, np.array([]))\n\n @property\n def name(self) -> str:\n \"\"\"The enumeration label string.\n\n :rtype: str\n \"\"\"\n return super().name\n\n @property\n def dtype(self) -> np.dtype:\n \"\"\"Numpy dtype representation of the enumeration type.\n\n :rtype: numpy.dtype\n \"\"\"\n return DataType.from_tiledb(super().type).np_dtype\n\n @property\n def cell_val_num(self) -> int:\n \"\"\"The enumeration's cell value number.\n\n :rtype: int\n \"\"\"\n return super().cell_val_num\n\n @property\n def ordered(self) -> bool:\n \"\"\"True if the enumeration is ordered.\n\n :rtype: bool\n \"\"\"\n return super().ordered\n\n def values(self) -> NDArray:\n \"\"\"The values of the enumeration.\n\n :rtype: NDArray\n \"\"\"\n if self.dtype.kind == \"U\":\n return np.array(super().str_values(), dtype=np.str_)\n elif self.dtype.kind == \"S\":\n return np.array(super().str_values(), dtype=np.bytes_)\n else:\n return np.array(super().values(), dtype=self.dtype)\n\n def extend(self, values: Sequence[Any]) -> Enumeration:\n \"\"\"Add additional values to the enumeration.\n\n :rtype: Enumeration\n \"\"\"\n values = np.array(values)\n if self.dtype.kind in \"US\" and values.dtype.kind not in \"US\":\n raise lt.TileDBError(\"Passed in enumeration must be string type\")\n\n if np.issubdtype(self.dtype, np.integer) and not np.issubdtype(\n values.dtype, np.integer\n ):\n raise lt.TileDBError(\"Passed in enumeration must be integer type\")\n\n return Enumeration.from_pybind11(self._ctx, super().extend(values))\n\n def __eq__(self, other):\n if not isinstance(other, Enumeration):\n return False\n\n return all(\n [\n self.name == other.name,\n self.dtype == other.dtype,\n self.cell_val_num == other.cell_val_num,\n self.ordered == other.ordered,\n np.array_equal(self.values(), other.values()),\n ]\n )\n\n def __repr__(self):\n # use safe repr if pybind11 constructor failed\n if self._ctx is None:\n return object.__repr__(self)\n\n return f\"Enumeration(name='{self.name}', cell_val_num={self.cell_val_num}, ordered={self.ordered}, values={list(self.values())})\"\n\n def _repr_html_(self):\n output = io.StringIO()\n\n output.write(\"<table>\")\n output.write(\"<tr>\")\n output.write(\"<th>Name</th>\")\n output.write(\"<th>Data Type</th>\")\n output.write(\"<th>Ordered</th>\")\n output.write(\"</tr>\")\n output.write(f\"{self._repr_html_row_only_()}\")\n output.write(\"</table>\")\n\n return output.getvalue()\n\n def _repr_html_row_only_(self):\n output = io.StringIO()\n\n output.write(\"<tr>\")\n output.write(f\"<td>{self.name}</td>\")\n output.write(f\"<td>{self.dtype}</td>\")\n output.write(f\"<td>{self.cell_val_num}</td>\")\n output.write(f\"<td>{self.ordered}</td>\")\n output.write(\"</tr>\")\n\n return output.getvalue()\n", "path": "tiledb/enumeration.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport io\nfrom typing import Any, Optional, Sequence\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nimport tiledb.cc as lt\n\nfrom .ctx import Ctx, CtxMixin\nfrom .datatypes import DataType\n\n\nclass Enumeration(CtxMixin, lt.Enumeration):\n \"\"\"\n Represents a TileDB Enumeration.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n ordered: bool,\n values: Optional[Sequence[Any]] = None,\n dtype: Optional[np.dtype] = None,\n ctx: Optional[Ctx] = None,\n ):\n \"\"\"Class representing the TileDB Enumeration.\n\n :param name: The name of the to-be created Enumeration\n :type name: str\n :param ordered: Whether or not to consider this enumeration ordered\n :type ordered: bool\n :param values: A Numpy array of values for this enumeration\n :type values: np.array\n :param dtype: The Numpy data type for this enumeration\n :type dtype: np.dtype\n :param ctx: A TileDB context\n :type ctx: tiledb.Ctx\n \"\"\"\n if values is None or len(values) == 0:\n if dtype is None:\n raise ValueError(\"dtype must be provied for empty enumeration\")\n super().__init__(ctx, name, np.dtype(dtype), ordered)\n\n values = np.array(values)\n if np.dtype(values.dtype).kind in \"US\":\n dtype = (\n lt.DataType.STRING_UTF8\n if values.dtype.kind == \"U\"\n else lt.DataType.STRING_ASCII\n )\n super().__init__(ctx, name, values, ordered, dtype)\n else:\n super().__init__(ctx, name, ordered, values, np.array([]))\n\n @property\n def name(self) -> str:\n \"\"\"The enumeration label string.\n\n :rtype: str\n \"\"\"\n return super().name\n\n @property\n def dtype(self) -> np.dtype:\n \"\"\"Numpy dtype representation of the enumeration type.\n\n :rtype: numpy.dtype\n \"\"\"\n return DataType.from_tiledb(super().type).np_dtype\n\n @property\n def cell_val_num(self) -> int:\n \"\"\"The enumeration's cell value number.\n\n :rtype: int\n \"\"\"\n return super().cell_val_num\n\n @property\n def ordered(self) -> bool:\n \"\"\"True if the enumeration is ordered.\n\n :rtype: bool\n \"\"\"\n return super().ordered\n\n def values(self) -> NDArray:\n \"\"\"The values of the enumeration.\n\n :rtype: NDArray\n \"\"\"\n if self.dtype.kind == \"U\":\n return np.array(super().str_values(), dtype=np.str_)\n elif self.dtype.kind == \"S\":\n return np.array(super().str_values(), dtype=np.bytes_)\n else:\n return np.array(super().values(), dtype=self.dtype)\n\n def extend(self, values: Sequence[Any]) -> Enumeration:\n \"\"\"Add additional values to the enumeration.\n\n :rtype: Enumeration\n \"\"\"\n values = np.array(values)\n if self.dtype.kind in \"US\" and values.dtype.kind not in \"US\":\n raise lt.TileDBError(\"Passed in enumeration must be string type\")\n\n if np.issubdtype(self.dtype, np.integer) and not np.issubdtype(\n values.dtype, np.integer\n ):\n raise lt.TileDBError(\"Passed in enumeration must be integer type\")\n\n return Enumeration.from_pybind11(self._ctx, super().extend(values))\n\n def __eq__(self, other):\n if not isinstance(other, Enumeration):\n return False\n\n return all(\n [\n self.name == other.name,\n self.dtype == other.dtype,\n self.cell_val_num == other.cell_val_num,\n self.ordered == other.ordered,\n np.array_equal(self.values(), other.values()),\n ]\n )\n\n def __repr__(self):\n # use safe repr if pybind11 constructor failed\n if self._ctx is None:\n return object.__repr__(self)\n\n return f\"Enumeration(name='{self.name}', dtype={self.dtype}, dtype_name='{self.dtype.name}', cell_val_num={self.cell_val_num}, ordered={self.ordered}, values={list(self.values())})\"\n\n def _repr_html_(self):\n output = io.StringIO()\n\n output.write(\"<table>\")\n output.write(\"<tr>\")\n output.write(\"<th>Name</th>\")\n output.write(\"<th>Data Type</th>\")\n output.write(\"<th>Ordered</th>\")\n output.write(\"</tr>\")\n output.write(f\"{self._repr_html_row_only_()}\")\n output.write(\"</table>\")\n\n return output.getvalue()\n\n def _repr_html_row_only_(self):\n output = io.StringIO()\n\n output.write(\"<tr>\")\n output.write(f\"<td>{self.name}</td>\")\n output.write(f\"<td>{self.dtype}</td>\")\n output.write(f\"<td>{self.cell_val_num}</td>\")\n output.write(f\"<td>{self.ordered}</td>\")\n output.write(\"</tr>\")\n\n return output.getvalue()\n", "path": "tiledb/enumeration.py"}]} | 1,877 | 253 |
gh_patches_debug_15455 | rasdani/github-patches | git_diff | kubeflow__pipelines-9088 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
v2 - support resource requests and limits
* [x] https://github.com/kubeflow/pipelines/pull/7045
* [x] #7043
* [x] #7047
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `samples/core/resource_spec/resource_spec_v2.py`
Content:
```
1 # Copyright 2020-2021 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 from kfp import dsl
17
18 # In tests, we install a KFP package from the PR under test. Users should not
19 # normally need to specify `kfp_package_path` in their component definitions.
20 _KFP_PACKAGE_PATH = os.getenv('KFP_PACKAGE_PATH')
21
22
23 @dsl.component(kfp_package_path=_KFP_PACKAGE_PATH)
24 def training_op(n: int) -> int:
25 # quickly allocate a lot of memory to verify memory is enough
26 a = [i for i in range(n)]
27 return len(a)
28
29
30 @dsl.pipeline(
31 name='pipeline-with-resource-spec',
32 description='A pipeline with resource specification.')
33 def my_pipeline(n: int = 11234567):
34 # For units of these resource limits,
35 # refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes
36 # 11234567 roughly needs 400Mi+ memory.
37 #
38 # Note, with v2 python components, there's a larger memory overhead caused
39 # by installing KFP SDK in the component, so we had to increase memory limit to 650M.
40 training_task = training_op(n=n).set_cpu_limit('1').set_memory_limit('650M')
41
42 # TODO(Bobgy): other resource specs like cpu requests, memory requests and
43 # GPU limits are not available yet: https://github.com/kubeflow/pipelines/issues/6354.
44 # There are other resource spec you can set.
45 # For example, to use TPU, add the following:
46 # .add_node_selector_constraint('cloud.google.com/gke-accelerator', 'tpu-v3')
47 # .set_gpu_limit(1)
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/samples/core/resource_spec/resource_spec_v2.py b/samples/core/resource_spec/resource_spec_v2.py
--- a/samples/core/resource_spec/resource_spec_v2.py
+++ b/samples/core/resource_spec/resource_spec_v2.py
@@ -38,6 +38,9 @@
# Note, with v2 python components, there's a larger memory overhead caused
# by installing KFP SDK in the component, so we had to increase memory limit to 650M.
training_task = training_op(n=n).set_cpu_limit('1').set_memory_limit('650M')
+
+ # TODO(gkcalat): enable requests once SDK implements the feature
+ # training_task = training_task.set_cpu_request('1').set_memory_request('650M')
# TODO(Bobgy): other resource specs like cpu requests, memory requests and
# GPU limits are not available yet: https://github.com/kubeflow/pipelines/issues/6354.
| {"golden_diff": "diff --git a/samples/core/resource_spec/resource_spec_v2.py b/samples/core/resource_spec/resource_spec_v2.py\n--- a/samples/core/resource_spec/resource_spec_v2.py\n+++ b/samples/core/resource_spec/resource_spec_v2.py\n@@ -38,6 +38,9 @@\n # Note, with v2 python components, there's a larger memory overhead caused\n # by installing KFP SDK in the component, so we had to increase memory limit to 650M.\n training_task = training_op(n=n).set_cpu_limit('1').set_memory_limit('650M')\n+ \n+ # TODO(gkcalat): enable requests once SDK implements the feature\n+ # training_task = training_task.set_cpu_request('1').set_memory_request('650M')\n \n # TODO(Bobgy): other resource specs like cpu requests, memory requests and\n # GPU limits are not available yet: https://github.com/kubeflow/pipelines/issues/6354.\n", "issue": "v2 - support resource requests and limits\n* [x] https://github.com/kubeflow/pipelines/pull/7045\r\n* [x] #7043\r\n* [x] #7047\r\n\n", "before_files": [{"content": "# Copyright 2020-2021 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom kfp import dsl\n\n# In tests, we install a KFP package from the PR under test. Users should not\n# normally need to specify `kfp_package_path` in their component definitions.\n_KFP_PACKAGE_PATH = os.getenv('KFP_PACKAGE_PATH')\n\n\[email protected](kfp_package_path=_KFP_PACKAGE_PATH)\ndef training_op(n: int) -> int:\n # quickly allocate a lot of memory to verify memory is enough\n a = [i for i in range(n)]\n return len(a)\n\n\[email protected](\n name='pipeline-with-resource-spec',\n description='A pipeline with resource specification.')\ndef my_pipeline(n: int = 11234567):\n # For units of these resource limits,\n # refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes\n # 11234567 roughly needs 400Mi+ memory.\n #\n # Note, with v2 python components, there's a larger memory overhead caused\n # by installing KFP SDK in the component, so we had to increase memory limit to 650M.\n training_task = training_op(n=n).set_cpu_limit('1').set_memory_limit('650M')\n\n # TODO(Bobgy): other resource specs like cpu requests, memory requests and\n # GPU limits are not available yet: https://github.com/kubeflow/pipelines/issues/6354.\n # There are other resource spec you can set.\n # For example, to use TPU, add the following:\n # .add_node_selector_constraint('cloud.google.com/gke-accelerator', 'tpu-v3')\n # .set_gpu_limit(1)\n", "path": "samples/core/resource_spec/resource_spec_v2.py"}], "after_files": [{"content": "# Copyright 2020-2021 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom kfp import dsl\n\n# In tests, we install a KFP package from the PR under test. Users should not\n# normally need to specify `kfp_package_path` in their component definitions.\n_KFP_PACKAGE_PATH = os.getenv('KFP_PACKAGE_PATH')\n\n\[email protected](kfp_package_path=_KFP_PACKAGE_PATH)\ndef training_op(n: int) -> int:\n # quickly allocate a lot of memory to verify memory is enough\n a = [i for i in range(n)]\n return len(a)\n\n\[email protected](\n name='pipeline-with-resource-spec',\n description='A pipeline with resource specification.')\ndef my_pipeline(n: int = 11234567):\n # For units of these resource limits,\n # refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes\n # 11234567 roughly needs 400Mi+ memory.\n #\n # Note, with v2 python components, there's a larger memory overhead caused\n # by installing KFP SDK in the component, so we had to increase memory limit to 650M.\n training_task = training_op(n=n).set_cpu_limit('1').set_memory_limit('650M')\n \n # TODO(gkcalat): enable requests once SDK implements the feature\n # training_task = training_task.set_cpu_request('1').set_memory_request('650M')\n\n # TODO(Bobgy): other resource specs like cpu requests, memory requests and\n # GPU limits are not available yet: https://github.com/kubeflow/pipelines/issues/6354.\n # There are other resource spec you can set.\n # For example, to use TPU, add the following:\n # .add_node_selector_constraint('cloud.google.com/gke-accelerator', 'tpu-v3')\n # .set_gpu_limit(1)\n", "path": "samples/core/resource_spec/resource_spec_v2.py"}]} | 923 | 218 |
gh_patches_debug_33968 | rasdani/github-patches | git_diff | pypa__pip-2281 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip 6.0.3 weird symbols shown with download progress bar
with pip 6.0.3 on Windows with cmd.exe
```
py -mpip install --upgrade setuptools
Collecting setuptools from https://pypi.python.org/packages/3.4/s/setuptools/set
uptools-8.3-py2.py3-none-any.whl#md5=a6c2914e2ae62227a5dfb6e908475b02
Downloading setuptools-8.3-py2.py3-none-any.whl (552kB)
←[K 100% |################################| 552kB 835kB/s ta 0:00:01
←[?25hInstalling collected packages: setuptools
Found existing installation: setuptools 7.0
Uninstalling setuptools-7.0:
Successfully uninstalled setuptools-7.0
Successfully installed setuptools-8.3
```
There's weird stuff with the progress bar, possibly control characers that cmd.exe terminal can't handle
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pip/utils/ui.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3
4 import itertools
5 import sys
6
7 from pip.utils import format_size
8 from pip.utils.logging import get_indentation
9 from pip._vendor.progress.bar import Bar
10 from pip._vendor.progress.helpers import WritelnMixin
11 from pip._vendor.progress.spinner import Spinner
12
13
14 class DownloadProgressMixin(object):
15
16 def __init__(self, *args, **kwargs):
17 super(DownloadProgressMixin, self).__init__(*args, **kwargs)
18 self.message = (" " * (get_indentation() + 2)) + self.message
19
20 @property
21 def downloaded(self):
22 return format_size(self.index)
23
24 @property
25 def download_speed(self):
26 # Avoid zero division errors...
27 if self.avg == 0.0:
28 return "..."
29 return format_size(1 / self.avg) + "/s"
30
31 @property
32 def pretty_eta(self):
33 if self.eta:
34 return "eta %s" % self.eta_td
35 return ""
36
37 def iter(self, it, n=1):
38 for x in it:
39 yield x
40 self.next(n)
41 self.finish()
42
43
44 class DownloadProgressBar(DownloadProgressMixin, Bar):
45
46 file = sys.stdout
47 message = "%(percent)d%%"
48 suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
49
50
51 class DownloadProgressSpinner(DownloadProgressMixin, WritelnMixin, Spinner):
52
53 file = sys.stdout
54 suffix = "%(downloaded)s %(download_speed)s"
55
56 def next_phase(self):
57 if not hasattr(self, "_phaser"):
58 self._phaser = itertools.cycle(self.phases)
59 return next(self._phaser)
60
61 def update(self):
62 message = self.message % self
63 phase = self.next_phase()
64 suffix = self.suffix % self
65 line = ''.join([
66 message,
67 " " if message else "",
68 phase,
69 " " if suffix else "",
70 suffix,
71 ])
72
73 self.writeln(line)
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pip/utils/ui.py b/pip/utils/ui.py
--- a/pip/utils/ui.py
+++ b/pip/utils/ui.py
@@ -4,12 +4,20 @@
import itertools
import sys
+from pip.compat import WINDOWS
from pip.utils import format_size
from pip.utils.logging import get_indentation
from pip._vendor.progress.bar import Bar
from pip._vendor.progress.helpers import WritelnMixin
from pip._vendor.progress.spinner import Spinner
+try:
+ from pip._vendor import colorama
+# Lots of different errors can come from this, including SystemError and
+# ImportError.
+except Exception:
+ colorama = None
+
class DownloadProgressMixin(object):
@@ -41,14 +49,40 @@
self.finish()
-class DownloadProgressBar(DownloadProgressMixin, Bar):
+class WindowsMixin(object):
+
+ def __init__(self, *args, **kwargs):
+ super(WindowsMixin, self).__init__(*args, **kwargs)
+
+ # Check if we are running on Windows and we have the colorama module,
+ # if we do then wrap our file with it.
+ if WINDOWS and colorama:
+ self.file = colorama.AnsiToWin32(self.file)
+ # The progress code expects to be able to call self.file.isatty()
+ # but the colorama.AnsiToWin32() object doesn't have that, so we'll
+ # add it.
+ self.file.isatty = lambda: self.file.wrapped.isatty()
+ # The progress code expects to be able to call self.file.flush()
+ # but the colorama.AnsiToWin32() object doesn't have that, so we'll
+ # add it.
+ self.file.flush = lambda: self.file.wrapped.flush()
+
+ # The Windows terminal does not support the hide/show cursor ANSI codes
+ # even with colorama. So we'll ensure that hide_cursor is False on
+ # Windows.
+ if WINDOWS and self.hide_cursor:
+ self.hide_cursor = False
+
+
+class DownloadProgressBar(WindowsMixin, DownloadProgressMixin, Bar):
file = sys.stdout
message = "%(percent)d%%"
suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
-class DownloadProgressSpinner(DownloadProgressMixin, WritelnMixin, Spinner):
+class DownloadProgressSpinner(WindowsMixin, DownloadProgressMixin,
+ WritelnMixin, Spinner):
file = sys.stdout
suffix = "%(downloaded)s %(download_speed)s"
| {"golden_diff": "diff --git a/pip/utils/ui.py b/pip/utils/ui.py\n--- a/pip/utils/ui.py\n+++ b/pip/utils/ui.py\n@@ -4,12 +4,20 @@\n import itertools\n import sys\n \n+from pip.compat import WINDOWS\n from pip.utils import format_size\n from pip.utils.logging import get_indentation\n from pip._vendor.progress.bar import Bar\n from pip._vendor.progress.helpers import WritelnMixin\n from pip._vendor.progress.spinner import Spinner\n \n+try:\n+ from pip._vendor import colorama\n+# Lots of different errors can come from this, including SystemError and\n+# ImportError.\n+except Exception:\n+ colorama = None\n+\n \n class DownloadProgressMixin(object):\n \n@@ -41,14 +49,40 @@\n self.finish()\n \n \n-class DownloadProgressBar(DownloadProgressMixin, Bar):\n+class WindowsMixin(object):\n+\n+ def __init__(self, *args, **kwargs):\n+ super(WindowsMixin, self).__init__(*args, **kwargs)\n+\n+ # Check if we are running on Windows and we have the colorama module,\n+ # if we do then wrap our file with it.\n+ if WINDOWS and colorama:\n+ self.file = colorama.AnsiToWin32(self.file)\n+ # The progress code expects to be able to call self.file.isatty()\n+ # but the colorama.AnsiToWin32() object doesn't have that, so we'll\n+ # add it.\n+ self.file.isatty = lambda: self.file.wrapped.isatty()\n+ # The progress code expects to be able to call self.file.flush()\n+ # but the colorama.AnsiToWin32() object doesn't have that, so we'll\n+ # add it.\n+ self.file.flush = lambda: self.file.wrapped.flush()\n+\n+ # The Windows terminal does not support the hide/show cursor ANSI codes\n+ # even with colorama. So we'll ensure that hide_cursor is False on\n+ # Windows.\n+ if WINDOWS and self.hide_cursor:\n+ self.hide_cursor = False\n+\n+\n+class DownloadProgressBar(WindowsMixin, DownloadProgressMixin, Bar):\n \n file = sys.stdout\n message = \"%(percent)d%%\"\n suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\"\n \n \n-class DownloadProgressSpinner(DownloadProgressMixin, WritelnMixin, Spinner):\n+class DownloadProgressSpinner(WindowsMixin, DownloadProgressMixin,\n+ WritelnMixin, Spinner):\n \n file = sys.stdout\n suffix = \"%(downloaded)s %(download_speed)s\"\n", "issue": "pip 6.0.3 weird symbols shown with download progress bar\nwith pip 6.0.3 on Windows with cmd.exe\n\n```\npy -mpip install --upgrade setuptools\n\nCollecting setuptools from https://pypi.python.org/packages/3.4/s/setuptools/set\nuptools-8.3-py2.py3-none-any.whl#md5=a6c2914e2ae62227a5dfb6e908475b02\n Downloading setuptools-8.3-py2.py3-none-any.whl (552kB)\n\u2190[K 100% |################################| 552kB 835kB/s ta 0:00:01\n\u2190[?25hInstalling collected packages: setuptools\n Found existing installation: setuptools 7.0\n Uninstalling setuptools-7.0:\n Successfully uninstalled setuptools-7.0\n\nSuccessfully installed setuptools-8.3\n```\n\nThere's weird stuff with the progress bar, possibly control characers that cmd.exe terminal can't handle\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport itertools\nimport sys\n\nfrom pip.utils import format_size\nfrom pip.utils.logging import get_indentation\nfrom pip._vendor.progress.bar import Bar\nfrom pip._vendor.progress.helpers import WritelnMixin\nfrom pip._vendor.progress.spinner import Spinner\n\n\nclass DownloadProgressMixin(object):\n\n def __init__(self, *args, **kwargs):\n super(DownloadProgressMixin, self).__init__(*args, **kwargs)\n self.message = (\" \" * (get_indentation() + 2)) + self.message\n\n @property\n def downloaded(self):\n return format_size(self.index)\n\n @property\n def download_speed(self):\n # Avoid zero division errors...\n if self.avg == 0.0:\n return \"...\"\n return format_size(1 / self.avg) + \"/s\"\n\n @property\n def pretty_eta(self):\n if self.eta:\n return \"eta %s\" % self.eta_td\n return \"\"\n\n def iter(self, it, n=1):\n for x in it:\n yield x\n self.next(n)\n self.finish()\n\n\nclass DownloadProgressBar(DownloadProgressMixin, Bar):\n\n file = sys.stdout\n message = \"%(percent)d%%\"\n suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\"\n\n\nclass DownloadProgressSpinner(DownloadProgressMixin, WritelnMixin, Spinner):\n\n file = sys.stdout\n suffix = \"%(downloaded)s %(download_speed)s\"\n\n def next_phase(self):\n if not hasattr(self, \"_phaser\"):\n self._phaser = itertools.cycle(self.phases)\n return next(self._phaser)\n\n def update(self):\n message = self.message % self\n phase = self.next_phase()\n suffix = self.suffix % self\n line = ''.join([\n message,\n \" \" if message else \"\",\n phase,\n \" \" if suffix else \"\",\n suffix,\n ])\n\n self.writeln(line)\n", "path": "pip/utils/ui.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport itertools\nimport sys\n\nfrom pip.compat import WINDOWS\nfrom pip.utils import format_size\nfrom pip.utils.logging import get_indentation\nfrom pip._vendor.progress.bar import Bar\nfrom pip._vendor.progress.helpers import WritelnMixin\nfrom pip._vendor.progress.spinner import Spinner\n\ntry:\n from pip._vendor import colorama\n# Lots of different errors can come from this, including SystemError and\n# ImportError.\nexcept Exception:\n colorama = None\n\n\nclass DownloadProgressMixin(object):\n\n def __init__(self, *args, **kwargs):\n super(DownloadProgressMixin, self).__init__(*args, **kwargs)\n self.message = (\" \" * (get_indentation() + 2)) + self.message\n\n @property\n def downloaded(self):\n return format_size(self.index)\n\n @property\n def download_speed(self):\n # Avoid zero division errors...\n if self.avg == 0.0:\n return \"...\"\n return format_size(1 / self.avg) + \"/s\"\n\n @property\n def pretty_eta(self):\n if self.eta:\n return \"eta %s\" % self.eta_td\n return \"\"\n\n def iter(self, it, n=1):\n for x in it:\n yield x\n self.next(n)\n self.finish()\n\n\nclass WindowsMixin(object):\n\n def __init__(self, *args, **kwargs):\n super(WindowsMixin, self).__init__(*args, **kwargs)\n\n # Check if we are running on Windows and we have the colorama module,\n # if we do then wrap our file with it.\n if WINDOWS and colorama:\n self.file = colorama.AnsiToWin32(self.file)\n # The progress code expects to be able to call self.file.isatty()\n # but the colorama.AnsiToWin32() object doesn't have that, so we'll\n # add it.\n self.file.isatty = lambda: self.file.wrapped.isatty()\n # The progress code expects to be able to call self.file.flush()\n # but the colorama.AnsiToWin32() object doesn't have that, so we'll\n # add it.\n self.file.flush = lambda: self.file.wrapped.flush()\n\n # The Windows terminal does not support the hide/show cursor ANSI codes\n # even with colorama. So we'll ensure that hide_cursor is False on\n # Windows.\n if WINDOWS and self.hide_cursor:\n self.hide_cursor = False\n\n\nclass DownloadProgressBar(WindowsMixin, DownloadProgressMixin, Bar):\n\n file = sys.stdout\n message = \"%(percent)d%%\"\n suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\"\n\n\nclass DownloadProgressSpinner(WindowsMixin, DownloadProgressMixin,\n WritelnMixin, Spinner):\n\n file = sys.stdout\n suffix = \"%(downloaded)s %(download_speed)s\"\n\n def next_phase(self):\n if not hasattr(self, \"_phaser\"):\n self._phaser = itertools.cycle(self.phases)\n return next(self._phaser)\n\n def update(self):\n message = self.message % self\n phase = self.next_phase()\n suffix = self.suffix % self\n line = ''.join([\n message,\n \" \" if message else \"\",\n phase,\n \" \" if suffix else \"\",\n suffix,\n ])\n\n self.writeln(line)\n", "path": "pip/utils/ui.py"}]} | 1,074 | 565 |
gh_patches_debug_17125 | rasdani/github-patches | git_diff | spack__spack-18458 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Installation issue: r-boot
<!-- Thanks for taking the time to report this build failure. To proceed with the report please:
1. Title the issue "Installation issue: <name-of-the-package>".
2. Provide the information required below.
We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively! -->
### Steps to reproduce the issue
<!-- Fill in the exact spec you are trying to build and the relevant part of the error message -->
```console
$ spack install r-boot%fj
==> Error: ChecksumError: sha256 checksum failed for /home/users/ea01/ea0114/spack-stage/spack-stage-r-boot-1.3-23-mm6cmoaof62r5y527kz24snjifgwpir6/boot_1.3-23.tar.gz
Expected 30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f but got 79236a5a770dc8bf5ce25d9aa303c5dc0574d94aa043fd00b8b4c8ccc877357f
```
Build of `r-boot%fj` on 2020 Aug has a checksum error.
This version added to spack at 31 Aug 2019. Please see https://github.com/spack/spack/commit/661a894c85f451a4ef868abcc9871653914361bd
According to our log, same build succeeded on 2019 Oct.
https://cloud.r-project.org/src/contrib/boot_1.3-23.tar.gz seems to be changed between these attempts.
We found old(30c89e19) boot_1.3-23.tar.gz from http://in.archive.ubuntu.com/pool/universe/b/boot/boot_1.3-23.orig.tar.gz
and compared with new(79236a5a) one.
Difference was tribial. ("Date/Publication" in boot/DESCRIPTION, and MD5 of the file in boot/MD5)
So I would like to update checksum value.
We have another question.
In this case, we found "old" archive and proof the differnce is trivial.
If we found checksum mismatch and could not find "old" archive to verify,
which is better in view of security?
1. create issue and discuss
2. directly make PR
<!-- Some packages have maintainers who have volunteered to debug build failures. Run `spack maintainers <name-of-the-package>` and @mention them here if they exist. -->
### General information
<!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. -->
- [ ] I have run `spack debug report` and reported the version of Spack/Python/Platform
- [ ] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers
- [ ] I have uploaded the build log and environment files
- [x] I have searched the issues of this repo and believe this is not a duplicate
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/r-boot/package.py`
Content:
```
1 # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack import *
7
8
9 class RBoot(RPackage):
10 """Functions and datasets for bootstrapping from the book "Bootstrap
11 Methods and Their Application" by A. C. Davison and D. V. Hinkley (1997,
12 CUP), originally written by Angelo Canty for S."""
13
14 homepage = "https://cloud.r-project.org/package=boot"
15 url = "https://cloud.r-project.org/src/contrib/boot_1.3-18.tar.gz"
16 list_url = "https://cloud.r-project.org/src/contrib/Archive/boot"
17
18 version('1.3-23', sha256='30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f')
19 version('1.3-22', sha256='cf1f0cb1e0a7a36dcb6ae038f5d0211a0e7a009c149bc9d21acb9c58c38b4dfc')
20 version('1.3-20', sha256='adcb90b72409705e3f9c69ea6c15673dcb649b464fed06723fe0930beac5212a')
21 version('1.3-18', sha256='12fd237f810a69cc8d0a51a67c57eaf9506bf0341c764f8ab7c1feb73722235e')
22
23 depends_on('[email protected]:', type=('build', 'run'))
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/var/spack/repos/builtin/packages/r-boot/package.py b/var/spack/repos/builtin/packages/r-boot/package.py
--- a/var/spack/repos/builtin/packages/r-boot/package.py
+++ b/var/spack/repos/builtin/packages/r-boot/package.py
@@ -15,7 +15,7 @@
url = "https://cloud.r-project.org/src/contrib/boot_1.3-18.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/boot"
- version('1.3-23', sha256='30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f')
+ version('1.3-23', sha256='79236a5a770dc8bf5ce25d9aa303c5dc0574d94aa043fd00b8b4c8ccc877357f')
version('1.3-22', sha256='cf1f0cb1e0a7a36dcb6ae038f5d0211a0e7a009c149bc9d21acb9c58c38b4dfc')
version('1.3-20', sha256='adcb90b72409705e3f9c69ea6c15673dcb649b464fed06723fe0930beac5212a')
version('1.3-18', sha256='12fd237f810a69cc8d0a51a67c57eaf9506bf0341c764f8ab7c1feb73722235e')
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/r-boot/package.py b/var/spack/repos/builtin/packages/r-boot/package.py\n--- a/var/spack/repos/builtin/packages/r-boot/package.py\n+++ b/var/spack/repos/builtin/packages/r-boot/package.py\n@@ -15,7 +15,7 @@\n url = \"https://cloud.r-project.org/src/contrib/boot_1.3-18.tar.gz\"\n list_url = \"https://cloud.r-project.org/src/contrib/Archive/boot\"\n \n- version('1.3-23', sha256='30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f')\n+ version('1.3-23', sha256='79236a5a770dc8bf5ce25d9aa303c5dc0574d94aa043fd00b8b4c8ccc877357f')\n version('1.3-22', sha256='cf1f0cb1e0a7a36dcb6ae038f5d0211a0e7a009c149bc9d21acb9c58c38b4dfc')\n version('1.3-20', sha256='adcb90b72409705e3f9c69ea6c15673dcb649b464fed06723fe0930beac5212a')\n version('1.3-18', sha256='12fd237f810a69cc8d0a51a67c57eaf9506bf0341c764f8ab7c1feb73722235e')\n", "issue": "Installation issue: r-boot\n<!-- Thanks for taking the time to report this build failure. To proceed with the report please:\r\n\r\n1. Title the issue \"Installation issue: <name-of-the-package>\".\r\n2. Provide the information required below.\r\n\r\nWe encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively! -->\r\n\r\n### Steps to reproduce the issue\r\n\r\n<!-- Fill in the exact spec you are trying to build and the relevant part of the error message -->\r\n```console\r\n$ spack install r-boot%fj\r\n==> Error: ChecksumError: sha256 checksum failed for /home/users/ea01/ea0114/spack-stage/spack-stage-r-boot-1.3-23-mm6cmoaof62r5y527kz24snjifgwpir6/boot_1.3-23.tar.gz\r\n Expected 30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f but got 79236a5a770dc8bf5ce25d9aa303c5dc0574d94aa043fd00b8b4c8ccc877357f\r\n```\r\n\r\nBuild of `r-boot%fj` on 2020 Aug has a checksum error.\r\nThis version added to spack at 31 Aug 2019. Please see https://github.com/spack/spack/commit/661a894c85f451a4ef868abcc9871653914361bd\r\nAccording to our log, same build succeeded on 2019 Oct. \r\n\r\nhttps://cloud.r-project.org/src/contrib/boot_1.3-23.tar.gz seems to be changed between these attempts.\r\nWe found old(30c89e19) boot_1.3-23.tar.gz from http://in.archive.ubuntu.com/pool/universe/b/boot/boot_1.3-23.orig.tar.gz\r\nand compared with new(79236a5a) one.\r\nDifference was tribial. (\"Date/Publication\" in boot/DESCRIPTION, and MD5 of the file in boot/MD5)\r\nSo I would like to update checksum value.\r\n\r\nWe have another question.\r\nIn this case, we found \"old\" archive and proof the differnce is trivial.\r\nIf we found checksum mismatch and could not find \"old\" archive to verify, \r\nwhich is better in view of security?\r\n1. create issue and discuss\r\n2. directly make PR\r\n\r\n<!-- Some packages have maintainers who have volunteered to debug build failures. Run `spack maintainers <name-of-the-package>` and @mention them here if they exist. -->\r\n\r\n### General information\r\n\r\n<!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. -->\r\n- [ ] I have run `spack debug report` and reported the version of Spack/Python/Platform\r\n- [ ] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers\r\n- [ ] I have uploaded the build log and environment files\r\n- [x] I have searched the issues of this repo and believe this is not a duplicate\r\n\n", "before_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass RBoot(RPackage):\n \"\"\"Functions and datasets for bootstrapping from the book \"Bootstrap\n Methods and Their Application\" by A. C. Davison and D. V. Hinkley (1997,\n CUP), originally written by Angelo Canty for S.\"\"\"\n\n homepage = \"https://cloud.r-project.org/package=boot\"\n url = \"https://cloud.r-project.org/src/contrib/boot_1.3-18.tar.gz\"\n list_url = \"https://cloud.r-project.org/src/contrib/Archive/boot\"\n\n version('1.3-23', sha256='30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f')\n version('1.3-22', sha256='cf1f0cb1e0a7a36dcb6ae038f5d0211a0e7a009c149bc9d21acb9c58c38b4dfc')\n version('1.3-20', sha256='adcb90b72409705e3f9c69ea6c15673dcb649b464fed06723fe0930beac5212a')\n version('1.3-18', sha256='12fd237f810a69cc8d0a51a67c57eaf9506bf0341c764f8ab7c1feb73722235e')\n\n depends_on('[email protected]:', type=('build', 'run'))\n", "path": "var/spack/repos/builtin/packages/r-boot/package.py"}], "after_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass RBoot(RPackage):\n \"\"\"Functions and datasets for bootstrapping from the book \"Bootstrap\n Methods and Their Application\" by A. C. Davison and D. V. Hinkley (1997,\n CUP), originally written by Angelo Canty for S.\"\"\"\n\n homepage = \"https://cloud.r-project.org/package=boot\"\n url = \"https://cloud.r-project.org/src/contrib/boot_1.3-18.tar.gz\"\n list_url = \"https://cloud.r-project.org/src/contrib/Archive/boot\"\n\n version('1.3-23', sha256='79236a5a770dc8bf5ce25d9aa303c5dc0574d94aa043fd00b8b4c8ccc877357f')\n version('1.3-22', sha256='cf1f0cb1e0a7a36dcb6ae038f5d0211a0e7a009c149bc9d21acb9c58c38b4dfc')\n version('1.3-20', sha256='adcb90b72409705e3f9c69ea6c15673dcb649b464fed06723fe0930beac5212a')\n version('1.3-18', sha256='12fd237f810a69cc8d0a51a67c57eaf9506bf0341c764f8ab7c1feb73722235e')\n\n depends_on('[email protected]:', type=('build', 'run'))\n", "path": "var/spack/repos/builtin/packages/r-boot/package.py"}]} | 1,551 | 471 |
gh_patches_debug_19842 | rasdani/github-patches | git_diff | ietf-tools__datatracker-5726 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Don't crash when a nomcom is partially set up.
Right now, if a nomcom group is created, and the associated NomCom object is not, anyone with a role in the nomcom group cannot use the datatracker - the construction of the menu crashes.
The places that crash need to be protected against this misconfiguration (especially while the configuration of the Group and NomCom objects are done manually).
See also https://github.com/ietf-tools/datatracker/issues/3289
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ietf/group/templatetags/group_filters.py`
Content:
```
1 from django import template
2
3 import debug # pyflakes:ignore
4
5 from ietf.group.models import Group
6
7 register = template.Library()
8
9 @register.filter
10 def has_sessions(group,num):
11 return group.session_set.filter(meeting__number=num).exists()
12
13 @register.filter
14 def active_roles(queryset):
15 return queryset.filter(group__state_id__in=['active', 'bof']).exclude(group__acronym='secretariat')
16
17 @register.filter
18 def active_nomcoms(user):
19 if not (user and hasattr(user, "is_authenticated") and user.is_authenticated):
20 return []
21
22 groups = []
23
24 groups.extend(Group.objects.filter(
25 role__person__user=user,
26 type_id='nomcom',
27 state__slug='active').distinct().select_related("type"))
28
29 return groups
30
31 @register.inclusion_tag('person/person_link.html')
32 def role_person_link(role, **kwargs):
33 title = kwargs.get('title', '')
34 cls = kwargs.get('class', '')
35 name = role.person.name
36 plain_name = role.person.plain_name()
37 email = role.email.address
38 return {'name': name, 'plain_name': plain_name, 'email': email, 'title': title, 'class': cls}
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ietf/group/templatetags/group_filters.py b/ietf/group/templatetags/group_filters.py
--- a/ietf/group/templatetags/group_filters.py
+++ b/ietf/group/templatetags/group_filters.py
@@ -2,7 +2,7 @@
import debug # pyflakes:ignore
-from ietf.group.models import Group
+from ietf.nomcom.models import NomCom
register = template.Library()
@@ -19,14 +19,15 @@
if not (user and hasattr(user, "is_authenticated") and user.is_authenticated):
return []
- groups = []
-
- groups.extend(Group.objects.filter(
- role__person__user=user,
- type_id='nomcom',
- state__slug='active').distinct().select_related("type"))
-
- return groups
+ return list(
+ NomCom.objects.filter(
+ group__role__person__user=user,
+ group__type_id='nomcom', # just in case...
+ group__state__slug='active',
+ )
+ .distinct()
+ .order_by("group__acronym")
+ )
@register.inclusion_tag('person/person_link.html')
def role_person_link(role, **kwargs):
| {"golden_diff": "diff --git a/ietf/group/templatetags/group_filters.py b/ietf/group/templatetags/group_filters.py\n--- a/ietf/group/templatetags/group_filters.py\n+++ b/ietf/group/templatetags/group_filters.py\n@@ -2,7 +2,7 @@\n \n import debug # pyflakes:ignore\n \n-from ietf.group.models import Group\n+from ietf.nomcom.models import NomCom\n \n register = template.Library()\n \n@@ -19,14 +19,15 @@\n if not (user and hasattr(user, \"is_authenticated\") and user.is_authenticated):\n return []\n \n- groups = []\n-\n- groups.extend(Group.objects.filter(\n- role__person__user=user,\n- type_id='nomcom',\n- state__slug='active').distinct().select_related(\"type\"))\n-\n- return groups\n+ return list(\n+ NomCom.objects.filter(\n+ group__role__person__user=user,\n+ group__type_id='nomcom', # just in case...\n+ group__state__slug='active',\n+ )\n+ .distinct()\n+ .order_by(\"group__acronym\")\n+ )\n \n @register.inclusion_tag('person/person_link.html')\n def role_person_link(role, **kwargs):\n", "issue": "Don't crash when a nomcom is partially set up.\nRight now, if a nomcom group is created, and the associated NomCom object is not, anyone with a role in the nomcom group cannot use the datatracker - the construction of the menu crashes.\r\n\r\nThe places that crash need to be protected against this misconfiguration (especially while the configuration of the Group and NomCom objects are done manually).\r\n\r\nSee also https://github.com/ietf-tools/datatracker/issues/3289\n", "before_files": [{"content": "from django import template\n\nimport debug # pyflakes:ignore\n\nfrom ietf.group.models import Group\n\nregister = template.Library()\n\[email protected]\ndef has_sessions(group,num):\n return group.session_set.filter(meeting__number=num).exists()\n\[email protected]\ndef active_roles(queryset):\n return queryset.filter(group__state_id__in=['active', 'bof']).exclude(group__acronym='secretariat')\n \[email protected]\ndef active_nomcoms(user):\n if not (user and hasattr(user, \"is_authenticated\") and user.is_authenticated):\n return []\n\n groups = []\n\n groups.extend(Group.objects.filter(\n role__person__user=user,\n type_id='nomcom',\n state__slug='active').distinct().select_related(\"type\"))\n\n return groups\n\[email protected]_tag('person/person_link.html')\ndef role_person_link(role, **kwargs):\n title = kwargs.get('title', '')\n cls = kwargs.get('class', '')\n name = role.person.name\n plain_name = role.person.plain_name()\n email = role.email.address\n return {'name': name, 'plain_name': plain_name, 'email': email, 'title': title, 'class': cls}\n", "path": "ietf/group/templatetags/group_filters.py"}], "after_files": [{"content": "from django import template\n\nimport debug # pyflakes:ignore\n\nfrom ietf.nomcom.models import NomCom\n\nregister = template.Library()\n\[email protected]\ndef has_sessions(group,num):\n return group.session_set.filter(meeting__number=num).exists()\n\[email protected]\ndef active_roles(queryset):\n return queryset.filter(group__state_id__in=['active', 'bof']).exclude(group__acronym='secretariat')\n \[email protected]\ndef active_nomcoms(user):\n if not (user and hasattr(user, \"is_authenticated\") and user.is_authenticated):\n return []\n\n return list(\n NomCom.objects.filter(\n group__role__person__user=user,\n group__type_id='nomcom', # just in case...\n group__state__slug='active',\n )\n .distinct()\n .order_by(\"group__acronym\")\n )\n\[email protected]_tag('person/person_link.html')\ndef role_person_link(role, **kwargs):\n title = kwargs.get('title', '')\n cls = kwargs.get('class', '')\n name = role.person.name\n plain_name = role.person.plain_name()\n email = role.email.address\n return {'name': name, 'plain_name': plain_name, 'email': email, 'title': title, 'class': cls}\n", "path": "ietf/group/templatetags/group_filters.py"}]} | 702 | 284 |
gh_patches_debug_1139 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-1150 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ServerException instead of ProxyServerError
##### Steps to reproduce the problem:
```
>>> from libmproxy.proxy.server import ProxyServer
>>> from libmproxy.proxy.config import ProxyConfig
>>> ProxyServer(ProxyConfig(port=80))
(...)
ServerException: Error starting proxy server: error(13, 'Permission denied')
```
##### What is the expected behavior?
According to the documentation:
```
>>> ProxyServer?
Type: type
String form: <class 'libmproxy.proxy.server.ProxyServer'>
File: /usr/lib/python2.7/dist-packages/libmproxy/proxy/server.py
Init definition: ProxyServer(self, config)
Docstring: <no docstring>
Init docstring: Raises ProxyServerError if there's a startup problem.
```
the expected behavior is
```
>>> ProxyServer(ProxyConfig(port=80))
(...)
ProxyServerError: Error starting proxy server: error(13, 'Permission denied')
```
##### What went wrong?
Maybe the documentation is wrong?
##### Any other comments?
Nope.
---
Mitmproxy Version: 0.15-2
Operating System: Debian Sid.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/proxy/server.py`
Content:
```
1 from __future__ import (absolute_import, print_function, division)
2
3 import traceback
4 import sys
5 import socket
6 import six
7
8 from netlib import tcp
9 from netlib.exceptions import TcpException
10 from netlib.http.http1 import assemble_response
11 from ..exceptions import ProtocolException, ServerException, ClientHandshakeException, Kill
12 from ..models import ClientConnection, make_error_response
13 from .modes import HttpUpstreamProxy, HttpProxy, ReverseProxy, TransparentProxy, Socks5Proxy
14 from .root_context import RootContext, Log
15
16
17 class DummyServer:
18 bound = False
19
20 def __init__(self, config):
21 self.config = config
22
23 def set_channel(self, channel):
24 pass
25
26 def serve_forever(self):
27 pass
28
29 def shutdown(self):
30 pass
31
32
33 class ProxyServer(tcp.TCPServer):
34 allow_reuse_address = True
35 bound = True
36
37 def __init__(self, config):
38 """
39 Raises ProxyServerError if there's a startup problem.
40 """
41 self.config = config
42 try:
43 super(ProxyServer, self).__init__((config.host, config.port))
44 except socket.error as e:
45 six.reraise(
46 ServerException,
47 ServerException('Error starting proxy server: ' + repr(e)),
48 sys.exc_info()[2]
49 )
50 self.channel = None
51
52 def set_channel(self, channel):
53 self.channel = channel
54
55 def handle_client_connection(self, conn, client_address):
56 h = ConnectionHandler(
57 conn,
58 client_address,
59 self.config,
60 self.channel
61 )
62 h.handle()
63
64
65 class ConnectionHandler(object):
66
67 def __init__(self, client_conn, client_address, config, channel):
68 self.config = config
69 """@type: mitmproxy.proxy.config.ProxyConfig"""
70 self.client_conn = ClientConnection(
71 client_conn,
72 client_address,
73 None)
74 """@type: mitmproxy.proxy.connection.ClientConnection"""
75 self.channel = channel
76 """@type: mitmproxy.controller.Channel"""
77
78 def _create_root_layer(self):
79 root_context = RootContext(
80 self.client_conn,
81 self.config,
82 self.channel
83 )
84
85 mode = self.config.mode
86 if mode == "upstream":
87 return HttpUpstreamProxy(
88 root_context,
89 self.config.upstream_server.address
90 )
91 elif mode == "transparent":
92 return TransparentProxy(root_context)
93 elif mode == "reverse":
94 server_tls = self.config.upstream_server.scheme == "https"
95 return ReverseProxy(
96 root_context,
97 self.config.upstream_server.address,
98 server_tls
99 )
100 elif mode == "socks5":
101 return Socks5Proxy(root_context)
102 elif mode == "regular":
103 return HttpProxy(root_context)
104 elif callable(mode): # pragma: no cover
105 return mode(root_context)
106 else: # pragma: no cover
107 raise ValueError("Unknown proxy mode: %s" % mode)
108
109 def handle(self):
110 self.log("clientconnect", "info")
111
112 root_layer = self._create_root_layer()
113
114 try:
115 root_layer = self.channel.ask("clientconnect", root_layer)
116 root_layer()
117 except Kill:
118 self.log("Connection killed", "info")
119 except ProtocolException as e:
120
121 if isinstance(e, ClientHandshakeException):
122 self.log(
123 "Client Handshake failed. "
124 "The client may not trust the proxy's certificate for {}.".format(e.server),
125 "error"
126 )
127 self.log(repr(e), "debug")
128 else:
129 self.log(repr(e), "info")
130
131 self.log(traceback.format_exc(), "debug")
132 # If an error propagates to the topmost level,
133 # we send an HTTP error response, which is both
134 # understandable by HTTP clients and humans.
135 try:
136 error_response = make_error_response(502, repr(e))
137 self.client_conn.send(assemble_response(error_response))
138 except TcpException:
139 pass
140 except Exception:
141 self.log(traceback.format_exc(), "error")
142 print(traceback.format_exc(), file=sys.stderr)
143 print("mitmproxy has crashed!", file=sys.stderr)
144 print("Please lodge a bug report at: https://github.com/mitmproxy/mitmproxy", file=sys.stderr)
145
146 self.log("clientdisconnect", "info")
147 self.channel.tell("clientdisconnect", root_layer)
148 self.client_conn.finish()
149
150 def log(self, msg, level):
151 msg = "{}: {}".format(repr(self.client_conn.address), msg)
152 self.channel.tell("log", Log(msg, level))
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/proxy/server.py b/mitmproxy/proxy/server.py
--- a/mitmproxy/proxy/server.py
+++ b/mitmproxy/proxy/server.py
@@ -36,7 +36,7 @@
def __init__(self, config):
"""
- Raises ProxyServerError if there's a startup problem.
+ Raises ServerException if there's a startup problem.
"""
self.config = config
try:
| {"golden_diff": "diff --git a/mitmproxy/proxy/server.py b/mitmproxy/proxy/server.py\n--- a/mitmproxy/proxy/server.py\n+++ b/mitmproxy/proxy/server.py\n@@ -36,7 +36,7 @@\n \n def __init__(self, config):\n \"\"\"\n- Raises ProxyServerError if there's a startup problem.\n+ Raises ServerException if there's a startup problem.\n \"\"\"\n self.config = config\n try:\n", "issue": "ServerException instead of ProxyServerError\n##### Steps to reproduce the problem:\n\n```\n>>> from libmproxy.proxy.server import ProxyServer\n>>> from libmproxy.proxy.config import ProxyConfig\n>>> ProxyServer(ProxyConfig(port=80))\n(...)\nServerException: Error starting proxy server: error(13, 'Permission denied')\n```\n##### What is the expected behavior?\n\nAccording to the documentation:\n\n```\n>>> ProxyServer? \nType: type\nString form: <class 'libmproxy.proxy.server.ProxyServer'>\nFile: /usr/lib/python2.7/dist-packages/libmproxy/proxy/server.py\nInit definition: ProxyServer(self, config)\nDocstring: <no docstring>\nInit docstring: Raises ProxyServerError if there's a startup problem.\n```\n\nthe expected behavior is \n\n```\n>>> ProxyServer(ProxyConfig(port=80))\n(...)\nProxyServerError: Error starting proxy server: error(13, 'Permission denied')\n```\n##### What went wrong?\n\nMaybe the documentation is wrong?\n##### Any other comments?\n\nNope.\n\n---\n\nMitmproxy Version: 0.15-2\nOperating System: Debian Sid.\n\n", "before_files": [{"content": "from __future__ import (absolute_import, print_function, division)\n\nimport traceback\nimport sys\nimport socket\nimport six\n\nfrom netlib import tcp\nfrom netlib.exceptions import TcpException\nfrom netlib.http.http1 import assemble_response\nfrom ..exceptions import ProtocolException, ServerException, ClientHandshakeException, Kill\nfrom ..models import ClientConnection, make_error_response\nfrom .modes import HttpUpstreamProxy, HttpProxy, ReverseProxy, TransparentProxy, Socks5Proxy\nfrom .root_context import RootContext, Log\n\n\nclass DummyServer:\n bound = False\n\n def __init__(self, config):\n self.config = config\n\n def set_channel(self, channel):\n pass\n\n def serve_forever(self):\n pass\n\n def shutdown(self):\n pass\n\n\nclass ProxyServer(tcp.TCPServer):\n allow_reuse_address = True\n bound = True\n\n def __init__(self, config):\n \"\"\"\n Raises ProxyServerError if there's a startup problem.\n \"\"\"\n self.config = config\n try:\n super(ProxyServer, self).__init__((config.host, config.port))\n except socket.error as e:\n six.reraise(\n ServerException,\n ServerException('Error starting proxy server: ' + repr(e)),\n sys.exc_info()[2]\n )\n self.channel = None\n\n def set_channel(self, channel):\n self.channel = channel\n\n def handle_client_connection(self, conn, client_address):\n h = ConnectionHandler(\n conn,\n client_address,\n self.config,\n self.channel\n )\n h.handle()\n\n\nclass ConnectionHandler(object):\n\n def __init__(self, client_conn, client_address, config, channel):\n self.config = config\n \"\"\"@type: mitmproxy.proxy.config.ProxyConfig\"\"\"\n self.client_conn = ClientConnection(\n client_conn,\n client_address,\n None)\n \"\"\"@type: mitmproxy.proxy.connection.ClientConnection\"\"\"\n self.channel = channel\n \"\"\"@type: mitmproxy.controller.Channel\"\"\"\n\n def _create_root_layer(self):\n root_context = RootContext(\n self.client_conn,\n self.config,\n self.channel\n )\n\n mode = self.config.mode\n if mode == \"upstream\":\n return HttpUpstreamProxy(\n root_context,\n self.config.upstream_server.address\n )\n elif mode == \"transparent\":\n return TransparentProxy(root_context)\n elif mode == \"reverse\":\n server_tls = self.config.upstream_server.scheme == \"https\"\n return ReverseProxy(\n root_context,\n self.config.upstream_server.address,\n server_tls\n )\n elif mode == \"socks5\":\n return Socks5Proxy(root_context)\n elif mode == \"regular\":\n return HttpProxy(root_context)\n elif callable(mode): # pragma: no cover\n return mode(root_context)\n else: # pragma: no cover\n raise ValueError(\"Unknown proxy mode: %s\" % mode)\n\n def handle(self):\n self.log(\"clientconnect\", \"info\")\n\n root_layer = self._create_root_layer()\n\n try:\n root_layer = self.channel.ask(\"clientconnect\", root_layer)\n root_layer()\n except Kill:\n self.log(\"Connection killed\", \"info\")\n except ProtocolException as e:\n\n if isinstance(e, ClientHandshakeException):\n self.log(\n \"Client Handshake failed. \"\n \"The client may not trust the proxy's certificate for {}.\".format(e.server),\n \"error\"\n )\n self.log(repr(e), \"debug\")\n else:\n self.log(repr(e), \"info\")\n\n self.log(traceback.format_exc(), \"debug\")\n # If an error propagates to the topmost level,\n # we send an HTTP error response, which is both\n # understandable by HTTP clients and humans.\n try:\n error_response = make_error_response(502, repr(e))\n self.client_conn.send(assemble_response(error_response))\n except TcpException:\n pass\n except Exception:\n self.log(traceback.format_exc(), \"error\")\n print(traceback.format_exc(), file=sys.stderr)\n print(\"mitmproxy has crashed!\", file=sys.stderr)\n print(\"Please lodge a bug report at: https://github.com/mitmproxy/mitmproxy\", file=sys.stderr)\n\n self.log(\"clientdisconnect\", \"info\")\n self.channel.tell(\"clientdisconnect\", root_layer)\n self.client_conn.finish()\n\n def log(self, msg, level):\n msg = \"{}: {}\".format(repr(self.client_conn.address), msg)\n self.channel.tell(\"log\", Log(msg, level))\n", "path": "mitmproxy/proxy/server.py"}], "after_files": [{"content": "from __future__ import (absolute_import, print_function, division)\n\nimport traceback\nimport sys\nimport socket\nimport six\n\nfrom netlib import tcp\nfrom netlib.exceptions import TcpException\nfrom netlib.http.http1 import assemble_response\nfrom ..exceptions import ProtocolException, ServerException, ClientHandshakeException, Kill\nfrom ..models import ClientConnection, make_error_response\nfrom .modes import HttpUpstreamProxy, HttpProxy, ReverseProxy, TransparentProxy, Socks5Proxy\nfrom .root_context import RootContext, Log\n\n\nclass DummyServer:\n bound = False\n\n def __init__(self, config):\n self.config = config\n\n def set_channel(self, channel):\n pass\n\n def serve_forever(self):\n pass\n\n def shutdown(self):\n pass\n\n\nclass ProxyServer(tcp.TCPServer):\n allow_reuse_address = True\n bound = True\n\n def __init__(self, config):\n \"\"\"\n Raises ServerException if there's a startup problem.\n \"\"\"\n self.config = config\n try:\n super(ProxyServer, self).__init__((config.host, config.port))\n except socket.error as e:\n six.reraise(\n ServerException,\n ServerException('Error starting proxy server: ' + repr(e)),\n sys.exc_info()[2]\n )\n self.channel = None\n\n def set_channel(self, channel):\n self.channel = channel\n\n def handle_client_connection(self, conn, client_address):\n h = ConnectionHandler(\n conn,\n client_address,\n self.config,\n self.channel\n )\n h.handle()\n\n\nclass ConnectionHandler(object):\n\n def __init__(self, client_conn, client_address, config, channel):\n self.config = config\n \"\"\"@type: mitmproxy.proxy.config.ProxyConfig\"\"\"\n self.client_conn = ClientConnection(\n client_conn,\n client_address,\n None)\n \"\"\"@type: mitmproxy.proxy.connection.ClientConnection\"\"\"\n self.channel = channel\n \"\"\"@type: mitmproxy.controller.Channel\"\"\"\n\n def _create_root_layer(self):\n root_context = RootContext(\n self.client_conn,\n self.config,\n self.channel\n )\n\n mode = self.config.mode\n if mode == \"upstream\":\n return HttpUpstreamProxy(\n root_context,\n self.config.upstream_server.address\n )\n elif mode == \"transparent\":\n return TransparentProxy(root_context)\n elif mode == \"reverse\":\n server_tls = self.config.upstream_server.scheme == \"https\"\n return ReverseProxy(\n root_context,\n self.config.upstream_server.address,\n server_tls\n )\n elif mode == \"socks5\":\n return Socks5Proxy(root_context)\n elif mode == \"regular\":\n return HttpProxy(root_context)\n elif callable(mode): # pragma: no cover\n return mode(root_context)\n else: # pragma: no cover\n raise ValueError(\"Unknown proxy mode: %s\" % mode)\n\n def handle(self):\n self.log(\"clientconnect\", \"info\")\n\n root_layer = self._create_root_layer()\n\n try:\n root_layer = self.channel.ask(\"clientconnect\", root_layer)\n root_layer()\n except Kill:\n self.log(\"Connection killed\", \"info\")\n except ProtocolException as e:\n\n if isinstance(e, ClientHandshakeException):\n self.log(\n \"Client Handshake failed. \"\n \"The client may not trust the proxy's certificate for {}.\".format(e.server),\n \"error\"\n )\n self.log(repr(e), \"debug\")\n else:\n self.log(repr(e), \"info\")\n\n self.log(traceback.format_exc(), \"debug\")\n # If an error propagates to the topmost level,\n # we send an HTTP error response, which is both\n # understandable by HTTP clients and humans.\n try:\n error_response = make_error_response(502, repr(e))\n self.client_conn.send(assemble_response(error_response))\n except TcpException:\n pass\n except Exception:\n self.log(traceback.format_exc(), \"error\")\n print(traceback.format_exc(), file=sys.stderr)\n print(\"mitmproxy has crashed!\", file=sys.stderr)\n print(\"Please lodge a bug report at: https://github.com/mitmproxy/mitmproxy\", file=sys.stderr)\n\n self.log(\"clientdisconnect\", \"info\")\n self.channel.tell(\"clientdisconnect\", root_layer)\n self.client_conn.finish()\n\n def log(self, msg, level):\n msg = \"{}: {}\".format(repr(self.client_conn.address), msg)\n self.channel.tell(\"log\", Log(msg, level))\n", "path": "mitmproxy/proxy/server.py"}]} | 1,846 | 99 |
gh_patches_debug_13573 | rasdani/github-patches | git_diff | vyperlang__vyper-891 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Disallow int128->int128 conversion.
### What's your issue about?
Disallow`int128` to be converted to `int128`, follows https://github.com/ethereum/vyper/pull/882.
### How can it be fixed?
Fill this in if you know how to fix it.
#### Cute Animal Picture

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vyper/types/convert.py`
Content:
```
1 from vyper.functions.signature import (
2 signature
3 )
4 from vyper.parser.parser_utils import (
5 LLLnode,
6 getpos,
7 byte_array_to_num
8 )
9 from vyper.exceptions import (
10 InvalidLiteralException,
11 TypeMismatchException,
12 )
13 from vyper.types import (
14 BaseType,
15 )
16 from vyper.types import (
17 get_type,
18 )
19 from vyper.utils import (
20 DECIMAL_DIVISOR,
21 MemoryPositions,
22 SizeLimits
23 )
24
25
26 @signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')
27 def to_int128(expr, args, kwargs, context):
28 in_node = args[0]
29 typ, len = get_type(in_node)
30 if typ in ('int128', 'uint256', 'bytes32'):
31 if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):
32 raise InvalidLiteralException("Number out of range: {}".format(in_node.value), expr)
33 return LLLnode.from_list(
34 ['clamp', ['mload', MemoryPositions.MINNUM], in_node,
35 ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128', in_node.typ.unit), pos=getpos(expr)
36 )
37 else:
38 return byte_array_to_num(in_node, expr, 'int128')
39
40
41 @signature(('num_literal', 'int128', 'bytes32'), 'str_literal')
42 def to_uint256(expr, args, kwargs, context):
43 in_node = args[0]
44 typ, len = get_type(in_node)
45 if isinstance(in_node, int):
46
47 if not SizeLimits.in_bounds('uint256', in_node):
48 raise InvalidLiteralException("Number out of range: {}".format(in_node))
49 _unit = in_node.typ.unit if typ == 'int128' else None
50 return LLLnode.from_list(in_node, typ=BaseType('uint256', _unit), pos=getpos(expr))
51 elif isinstance(in_node, LLLnode) and typ in ('int128', 'num_literal'):
52 _unit = in_node.typ.unit if typ == 'int128' else None
53 return LLLnode.from_list(['clampge', in_node, 0], typ=BaseType('uint256', _unit), pos=getpos(expr))
54 elif isinstance(in_node, LLLnode) and typ in ('bytes32'):
55 return LLLnode(value=in_node.value, args=in_node.args, typ=BaseType('uint256'), pos=getpos(expr))
56 else:
57 raise InvalidLiteralException("Invalid input for uint256: %r" % in_node, expr)
58
59
60 @signature(('int128', 'uint256'), 'str_literal')
61 def to_decimal(expr, args, kwargs, context):
62 input = args[0]
63 if input.typ.typ == 'uint256':
64 return LLLnode.from_list(
65 ['uclample', ['mul', input, DECIMAL_DIVISOR], ['mload', MemoryPositions.MAXDECIMAL]],
66 typ=BaseType('decimal', input.typ.unit, input.typ.positional), pos=getpos(expr)
67 )
68 else:
69 return LLLnode.from_list(
70 ['mul', input, DECIMAL_DIVISOR],
71 typ=BaseType('decimal', input.typ.unit, input.typ.positional),
72 pos=getpos(expr)
73 )
74
75
76 @signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')
77 def to_bytes32(expr, args, kwargs, context):
78 input = args[0]
79 typ, len = get_type(input)
80 if typ == 'bytes':
81 if len != 32:
82 raise TypeMismatchException("Unable to convert bytes[{}] to bytes32".format(len))
83 if input.location == "memory":
84 return LLLnode.from_list(
85 ['mload', ['add', input, 32]], typ=BaseType('bytes32')
86 )
87 elif input.location == "storage":
88 return LLLnode.from_list(
89 ['sload', ['add', ['sha3_32', input], 1]], typ=BaseType('bytes32')
90 )
91 else:
92 return LLLnode(value=input.value, args=input.args, typ=BaseType('bytes32'), pos=getpos(expr))
93
94
95 def convert(expr, context):
96 output_type = expr.args[1].s
97 if output_type in conversion_table:
98 return conversion_table[output_type](expr, context)
99 else:
100 raise Exception("Conversion to {} is invalid.".format(output_type))
101
102
103 conversion_table = {
104 'int128': to_int128,
105 'uint256': to_uint256,
106 'decimal': to_decimal,
107 'bytes32': to_bytes32,
108 }
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vyper/types/convert.py b/vyper/types/convert.py
--- a/vyper/types/convert.py
+++ b/vyper/types/convert.py
@@ -23,11 +23,11 @@
)
-@signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')
+@signature(('uint256', 'bytes32', 'bytes'), 'str_literal')
def to_int128(expr, args, kwargs, context):
in_node = args[0]
typ, len = get_type(in_node)
- if typ in ('int128', 'uint256', 'bytes32'):
+ if typ in ('uint256', 'bytes32'):
if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):
raise InvalidLiteralException("Number out of range: {}".format(in_node.value), expr)
return LLLnode.from_list(
| {"golden_diff": "diff --git a/vyper/types/convert.py b/vyper/types/convert.py\n--- a/vyper/types/convert.py\n+++ b/vyper/types/convert.py\n@@ -23,11 +23,11 @@\n )\n \n \n-@signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')\n+@signature(('uint256', 'bytes32', 'bytes'), 'str_literal')\n def to_int128(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n- if typ in ('int128', 'uint256', 'bytes32'):\n+ if typ in ('uint256', 'bytes32'):\n if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node.value), expr)\n return LLLnode.from_list(\n", "issue": "Disallow int128->int128 conversion.\n### What's your issue about?\r\n\r\nDisallow`int128` to be converted to `int128`, follows https://github.com/ethereum/vyper/pull/882.\r\n\r\n### How can it be fixed?\r\n\r\nFill this in if you know how to fix it.\r\n\r\n#### Cute Animal Picture\r\n\r\n\n", "before_files": [{"content": "from vyper.functions.signature import (\n signature\n)\nfrom vyper.parser.parser_utils import (\n LLLnode,\n getpos,\n byte_array_to_num\n)\nfrom vyper.exceptions import (\n InvalidLiteralException,\n TypeMismatchException,\n)\nfrom vyper.types import (\n BaseType,\n)\nfrom vyper.types import (\n get_type,\n)\nfrom vyper.utils import (\n DECIMAL_DIVISOR,\n MemoryPositions,\n SizeLimits\n)\n\n\n@signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')\ndef to_int128(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if typ in ('int128', 'uint256', 'bytes32'):\n if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node.value), expr)\n return LLLnode.from_list(\n ['clamp', ['mload', MemoryPositions.MINNUM], in_node,\n ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128', in_node.typ.unit), pos=getpos(expr)\n )\n else:\n return byte_array_to_num(in_node, expr, 'int128')\n\n\n@signature(('num_literal', 'int128', 'bytes32'), 'str_literal')\ndef to_uint256(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if isinstance(in_node, int):\n\n if not SizeLimits.in_bounds('uint256', in_node):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node))\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(in_node, typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('int128', 'num_literal'):\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(['clampge', in_node, 0], typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('bytes32'):\n return LLLnode(value=in_node.value, args=in_node.args, typ=BaseType('uint256'), pos=getpos(expr))\n else:\n raise InvalidLiteralException(\"Invalid input for uint256: %r\" % in_node, expr)\n\n\n@signature(('int128', 'uint256'), 'str_literal')\ndef to_decimal(expr, args, kwargs, context):\n input = args[0]\n if input.typ.typ == 'uint256':\n return LLLnode.from_list(\n ['uclample', ['mul', input, DECIMAL_DIVISOR], ['mload', MemoryPositions.MAXDECIMAL]],\n typ=BaseType('decimal', input.typ.unit, input.typ.positional), pos=getpos(expr)\n )\n else:\n return LLLnode.from_list(\n ['mul', input, DECIMAL_DIVISOR],\n typ=BaseType('decimal', input.typ.unit, input.typ.positional),\n pos=getpos(expr)\n )\n\n\n@signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')\ndef to_bytes32(expr, args, kwargs, context):\n input = args[0]\n typ, len = get_type(input)\n if typ == 'bytes':\n if len != 32:\n raise TypeMismatchException(\"Unable to convert bytes[{}] to bytes32\".format(len))\n if input.location == \"memory\":\n return LLLnode.from_list(\n ['mload', ['add', input, 32]], typ=BaseType('bytes32')\n )\n elif input.location == \"storage\":\n return LLLnode.from_list(\n ['sload', ['add', ['sha3_32', input], 1]], typ=BaseType('bytes32')\n )\n else:\n return LLLnode(value=input.value, args=input.args, typ=BaseType('bytes32'), pos=getpos(expr))\n\n\ndef convert(expr, context):\n output_type = expr.args[1].s\n if output_type in conversion_table:\n return conversion_table[output_type](expr, context)\n else:\n raise Exception(\"Conversion to {} is invalid.\".format(output_type))\n\n\nconversion_table = {\n 'int128': to_int128,\n 'uint256': to_uint256,\n 'decimal': to_decimal,\n 'bytes32': to_bytes32,\n}\n", "path": "vyper/types/convert.py"}], "after_files": [{"content": "from vyper.functions.signature import (\n signature\n)\nfrom vyper.parser.parser_utils import (\n LLLnode,\n getpos,\n byte_array_to_num\n)\nfrom vyper.exceptions import (\n InvalidLiteralException,\n TypeMismatchException,\n)\nfrom vyper.types import (\n BaseType,\n)\nfrom vyper.types import (\n get_type,\n)\nfrom vyper.utils import (\n DECIMAL_DIVISOR,\n MemoryPositions,\n SizeLimits\n)\n\n\n@signature(('uint256', 'bytes32', 'bytes'), 'str_literal')\ndef to_int128(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if typ in ('uint256', 'bytes32'):\n if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node.value), expr)\n return LLLnode.from_list(\n ['clamp', ['mload', MemoryPositions.MINNUM], in_node,\n ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128', in_node.typ.unit), pos=getpos(expr)\n )\n else:\n return byte_array_to_num(in_node, expr, 'int128')\n\n\n@signature(('num_literal', 'int128', 'bytes32'), 'str_literal')\ndef to_uint256(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if isinstance(in_node, int):\n\n if not SizeLimits.in_bounds('uint256', in_node):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node))\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(in_node, typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('int128', 'num_literal'):\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(['clampge', in_node, 0], typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('bytes32'):\n return LLLnode(value=in_node.value, args=in_node.args, typ=BaseType('uint256'), pos=getpos(expr))\n else:\n raise InvalidLiteralException(\"Invalid input for uint256: %r\" % in_node, expr)\n\n\n@signature(('int128', 'uint256'), 'str_literal')\ndef to_decimal(expr, args, kwargs, context):\n input = args[0]\n if input.typ.typ == 'uint256':\n return LLLnode.from_list(\n ['uclample', ['mul', input, DECIMAL_DIVISOR], ['mload', MemoryPositions.MAXDECIMAL]],\n typ=BaseType('decimal', input.typ.unit, input.typ.positional), pos=getpos(expr)\n )\n else:\n return LLLnode.from_list(\n ['mul', input, DECIMAL_DIVISOR],\n typ=BaseType('decimal', input.typ.unit, input.typ.positional),\n pos=getpos(expr)\n )\n\n\n@signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')\ndef to_bytes32(expr, args, kwargs, context):\n input = args[0]\n typ, len = get_type(input)\n if typ == 'bytes':\n if len != 32:\n raise TypeMismatchException(\"Unable to convert bytes[{}] to bytes32\".format(len))\n if input.location == \"memory\":\n return LLLnode.from_list(\n ['mload', ['add', input, 32]], typ=BaseType('bytes32')\n )\n elif input.location == \"storage\":\n return LLLnode.from_list(\n ['sload', ['add', ['sha3_32', input], 1]], typ=BaseType('bytes32')\n )\n else:\n return LLLnode(value=input.value, args=input.args, typ=BaseType('bytes32'), pos=getpos(expr))\n\n\ndef convert(expr, context):\n output_type = expr.args[1].s\n if output_type in conversion_table:\n return conversion_table[output_type](expr, context)\n else:\n raise Exception(\"Conversion to {} is invalid.\".format(output_type))\n\n\nconversion_table = {\n 'int128': to_int128,\n 'uint256': to_uint256,\n 'decimal': to_decimal,\n 'bytes32': to_bytes32,\n}\n", "path": "vyper/types/convert.py"}]} | 1,648 | 220 |
gh_patches_debug_9417 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3804 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
boots.py spider doesn't correctly pick up all opticians
The current test in boots.py to switch the brand tags for opticians is `properties["name"].startswith("Opticians - ")`:
https://github.com/alltheplaces/alltheplaces/blob/master/locations/spiders/boots.py#L73
But this is not general enough to catch all of them. The displayed name of some opticians branches only start with "Opticians " or "Opticians-". For example https://www.boots.com/stores/3730-tewkesbury-high-street-opticians-gl20-5jz and https://www.boots.com/stores/3947-camden-high-street-opticians-nw1-0lu
I think you could safely change the test to `properties["name"].startswith("Opticians")` but the code a few lines below to strip out the "Opticians" prefix would need to be more complicated.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/boots.py`
Content:
```
1 import scrapy
2
3 from locations.items import GeojsonPointItem
4
5
6 class BootsSpider(scrapy.Spider):
7 name = "boots"
8 item_attributes = {"brand": "Boots", "brand_wikidata": "Q6123139"}
9 allowed_domains = ["www.boots.com", "www.boots.ie"]
10 download_delay = 0.5
11 start_urls = ["http://www.boots.com/store-a-z", "http://www.boots.ie/store-a-z"]
12
13 def parse_hours(self, lis):
14 hours = []
15 for li in lis:
16 day = li.xpath(
17 'normalize-space(./td[@class="store_hours_day"]/text())'
18 ).extract_first()
19 times = (
20 li.xpath('normalize-space(./td[@class="store_hours_time"]/text())')
21 .extract_first()
22 .replace(" ", "")
23 .replace("Closed-Closed", "off")
24 )
25 if times and day:
26 hours.append(day[:2] + " " + times)
27
28 return "; ".join(hours)
29
30 def parse_stores(self, response):
31 addr_full = response.xpath(
32 '//section[@class="store_details_content rowContainer"]/dl[@class="store_info_list"][1]/dd[@class="store_info_list_item"]/text()'
33 ).extract()
34 address = ", ".join(map(str.strip, addr_full))
35 # Handle blank store pages e.g. https://www.boots.com/stores/2250-alnwick-paikes-street-ne66-1hx
36 if len(address) == 0:
37 return
38
39 properties = {
40 "ref": response.xpath(
41 'normalize-space(//input[@id="bootsStoreId"]/@value)'
42 ).extract_first(),
43 "name": response.xpath(
44 'normalize-space(//input[@id="inputLocation"][@name="inputLocation"]/@value)'
45 ).extract_first(),
46 "postcode": response.xpath(
47 'normalize-space(//input[@id="storePostcode"]/@value)'
48 ).extract_first(),
49 "addr_full": address,
50 "phone": response.xpath(
51 '//section[@class="store_details_content rowContainer"]/dl[@class="store_info_list"][3]/dd[@class="store_info_list_item"]/a/text()'
52 ).extract_first(),
53 "country": response.xpath(
54 'normalize-space(//input[@id="countryCode"][@name="countryCode"]/@value)'
55 ).extract_first(),
56 "website": response.url,
57 "lat": response.xpath(
58 'normalize-space(//input[@id="lat"]/@value)'
59 ).extract_first(),
60 "lon": response.xpath(
61 'normalize-space(//input[@id="lon"]/@value)'
62 ).extract_first(),
63 }
64
65 hours = self.parse_hours(
66 response.xpath(
67 '//div[@class="row store_all_opening_hours"]/div[1]/table[@class="store_opening_hours "]/tbody/tr'
68 )
69 )
70 if hours:
71 properties["opening_hours"] = hours
72
73 if properties["name"].startswith("Opticians - "):
74 properties["brand"] = "Boots Opticians"
75 properties["brand_wikidata"] = "Q4944037"
76 properties["name"] = properties["name"][12:]
77
78 yield GeojsonPointItem(**properties)
79
80 def parse(self, response):
81 urls = response.xpath(
82 '//div[@class="brand_list_viewer"]/div[@class="column"]/ul/li/a/@href'
83 ).extract()
84 for path in urls:
85 yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/boots.py b/locations/spiders/boots.py
--- a/locations/spiders/boots.py
+++ b/locations/spiders/boots.py
@@ -70,10 +70,10 @@
if hours:
properties["opening_hours"] = hours
- if properties["name"].startswith("Opticians - "):
+ if properties["name"].startswith("Opticians"):
properties["brand"] = "Boots Opticians"
properties["brand_wikidata"] = "Q4944037"
- properties["name"] = properties["name"][12:]
+ properties["name"] = properties["name"].replace("Opticians", "").strip("- ")
yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/boots.py b/locations/spiders/boots.py\n--- a/locations/spiders/boots.py\n+++ b/locations/spiders/boots.py\n@@ -70,10 +70,10 @@\n if hours:\n properties[\"opening_hours\"] = hours\n \n- if properties[\"name\"].startswith(\"Opticians - \"):\n+ if properties[\"name\"].startswith(\"Opticians\"):\n properties[\"brand\"] = \"Boots Opticians\"\n properties[\"brand_wikidata\"] = \"Q4944037\"\n- properties[\"name\"] = properties[\"name\"][12:]\n+ properties[\"name\"] = properties[\"name\"].replace(\"Opticians\", \"\").strip(\"- \")\n \n yield GeojsonPointItem(**properties)\n", "issue": "boots.py spider doesn't correctly pick up all opticians\nThe current test in boots.py to switch the brand tags for opticians is `properties[\"name\"].startswith(\"Opticians - \")`:\r\nhttps://github.com/alltheplaces/alltheplaces/blob/master/locations/spiders/boots.py#L73\r\n\r\nBut this is not general enough to catch all of them. The displayed name of some opticians branches only start with \"Opticians \" or \"Opticians-\". For example https://www.boots.com/stores/3730-tewkesbury-high-street-opticians-gl20-5jz and https://www.boots.com/stores/3947-camden-high-street-opticians-nw1-0lu\r\n\r\nI think you could safely change the test to `properties[\"name\"].startswith(\"Opticians\")` but the code a few lines below to strip out the \"Opticians\" prefix would need to be more complicated.\n", "before_files": [{"content": "import scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass BootsSpider(scrapy.Spider):\n name = \"boots\"\n item_attributes = {\"brand\": \"Boots\", \"brand_wikidata\": \"Q6123139\"}\n allowed_domains = [\"www.boots.com\", \"www.boots.ie\"]\n download_delay = 0.5\n start_urls = [\"http://www.boots.com/store-a-z\", \"http://www.boots.ie/store-a-z\"]\n\n def parse_hours(self, lis):\n hours = []\n for li in lis:\n day = li.xpath(\n 'normalize-space(./td[@class=\"store_hours_day\"]/text())'\n ).extract_first()\n times = (\n li.xpath('normalize-space(./td[@class=\"store_hours_time\"]/text())')\n .extract_first()\n .replace(\" \", \"\")\n .replace(\"Closed-Closed\", \"off\")\n )\n if times and day:\n hours.append(day[:2] + \" \" + times)\n\n return \"; \".join(hours)\n\n def parse_stores(self, response):\n addr_full = response.xpath(\n '//section[@class=\"store_details_content rowContainer\"]/dl[@class=\"store_info_list\"][1]/dd[@class=\"store_info_list_item\"]/text()'\n ).extract()\n address = \", \".join(map(str.strip, addr_full))\n # Handle blank store pages e.g. https://www.boots.com/stores/2250-alnwick-paikes-street-ne66-1hx\n if len(address) == 0:\n return\n\n properties = {\n \"ref\": response.xpath(\n 'normalize-space(//input[@id=\"bootsStoreId\"]/@value)'\n ).extract_first(),\n \"name\": response.xpath(\n 'normalize-space(//input[@id=\"inputLocation\"][@name=\"inputLocation\"]/@value)'\n ).extract_first(),\n \"postcode\": response.xpath(\n 'normalize-space(//input[@id=\"storePostcode\"]/@value)'\n ).extract_first(),\n \"addr_full\": address,\n \"phone\": response.xpath(\n '//section[@class=\"store_details_content rowContainer\"]/dl[@class=\"store_info_list\"][3]/dd[@class=\"store_info_list_item\"]/a/text()'\n ).extract_first(),\n \"country\": response.xpath(\n 'normalize-space(//input[@id=\"countryCode\"][@name=\"countryCode\"]/@value)'\n ).extract_first(),\n \"website\": response.url,\n \"lat\": response.xpath(\n 'normalize-space(//input[@id=\"lat\"]/@value)'\n ).extract_first(),\n \"lon\": response.xpath(\n 'normalize-space(//input[@id=\"lon\"]/@value)'\n ).extract_first(),\n }\n\n hours = self.parse_hours(\n response.xpath(\n '//div[@class=\"row store_all_opening_hours\"]/div[1]/table[@class=\"store_opening_hours \"]/tbody/tr'\n )\n )\n if hours:\n properties[\"opening_hours\"] = hours\n\n if properties[\"name\"].startswith(\"Opticians - \"):\n properties[\"brand\"] = \"Boots Opticians\"\n properties[\"brand_wikidata\"] = \"Q4944037\"\n properties[\"name\"] = properties[\"name\"][12:]\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n urls = response.xpath(\n '//div[@class=\"brand_list_viewer\"]/div[@class=\"column\"]/ul/li/a/@href'\n ).extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)\n", "path": "locations/spiders/boots.py"}], "after_files": [{"content": "import scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass BootsSpider(scrapy.Spider):\n name = \"boots\"\n item_attributes = {\"brand\": \"Boots\", \"brand_wikidata\": \"Q6123139\"}\n allowed_domains = [\"www.boots.com\", \"www.boots.ie\"]\n download_delay = 0.5\n start_urls = [\"http://www.boots.com/store-a-z\", \"http://www.boots.ie/store-a-z\"]\n\n def parse_hours(self, lis):\n hours = []\n for li in lis:\n day = li.xpath(\n 'normalize-space(./td[@class=\"store_hours_day\"]/text())'\n ).extract_first()\n times = (\n li.xpath('normalize-space(./td[@class=\"store_hours_time\"]/text())')\n .extract_first()\n .replace(\" \", \"\")\n .replace(\"Closed-Closed\", \"off\")\n )\n if times and day:\n hours.append(day[:2] + \" \" + times)\n\n return \"; \".join(hours)\n\n def parse_stores(self, response):\n addr_full = response.xpath(\n '//section[@class=\"store_details_content rowContainer\"]/dl[@class=\"store_info_list\"][1]/dd[@class=\"store_info_list_item\"]/text()'\n ).extract()\n address = \", \".join(map(str.strip, addr_full))\n # Handle blank store pages e.g. https://www.boots.com/stores/2250-alnwick-paikes-street-ne66-1hx\n if len(address) == 0:\n return\n\n properties = {\n \"ref\": response.xpath(\n 'normalize-space(//input[@id=\"bootsStoreId\"]/@value)'\n ).extract_first(),\n \"name\": response.xpath(\n 'normalize-space(//input[@id=\"inputLocation\"][@name=\"inputLocation\"]/@value)'\n ).extract_first(),\n \"postcode\": response.xpath(\n 'normalize-space(//input[@id=\"storePostcode\"]/@value)'\n ).extract_first(),\n \"addr_full\": address,\n \"phone\": response.xpath(\n '//section[@class=\"store_details_content rowContainer\"]/dl[@class=\"store_info_list\"][3]/dd[@class=\"store_info_list_item\"]/a/text()'\n ).extract_first(),\n \"country\": response.xpath(\n 'normalize-space(//input[@id=\"countryCode\"][@name=\"countryCode\"]/@value)'\n ).extract_first(),\n \"website\": response.url,\n \"lat\": response.xpath(\n 'normalize-space(//input[@id=\"lat\"]/@value)'\n ).extract_first(),\n \"lon\": response.xpath(\n 'normalize-space(//input[@id=\"lon\"]/@value)'\n ).extract_first(),\n }\n\n hours = self.parse_hours(\n response.xpath(\n '//div[@class=\"row store_all_opening_hours\"]/div[1]/table[@class=\"store_opening_hours \"]/tbody/tr'\n )\n )\n if hours:\n properties[\"opening_hours\"] = hours\n\n if properties[\"name\"].startswith(\"Opticians\"):\n properties[\"brand\"] = \"Boots Opticians\"\n properties[\"brand_wikidata\"] = \"Q4944037\"\n properties[\"name\"] = properties[\"name\"].replace(\"Opticians\", \"\").strip(\"- \")\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n urls = response.xpath(\n '//div[@class=\"brand_list_viewer\"]/div[@class=\"column\"]/ul/li/a/@href'\n ).extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)\n", "path": "locations/spiders/boots.py"}]} | 1,409 | 172 |
gh_patches_debug_384 | rasdani/github-patches | git_diff | Gallopsled__pwntools-1811 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
List comprehension in __all__ prevents Pylance from working
Thanks for contributing to Pwntools! Ideas from the community help make Pwntools an amazing tool for everybody.
If you've got an idea for a new feature, please provide information about:
* What the feature does
According to https://github.com/microsoft/pylance-release/issues/289, the list comprehension in `__all__` in https://github.com/Gallopsled/pwntools/blob/4e6ccb0da17fb91e43a4f9e95edf4fd83806ba23/pwn/toplevel.py#L85 prevents [Pylance](https://marketplace.visualstudio.com/items?itemName=ms-python.vscode-pylance) from working (when using `from pwn import *` instead of manually importing all modules).
https://github.com/compas-dev/compas/issues/621 may be a solution instead of listing all attributes manually to fix that.
* Why the feature should exist
To make Pylance happy :smile:
* What tests should be included
Test in VS Code to ensure it works.
If you think you can write the feature yourself, please submit a Pull Request and we can review your changes!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwn/toplevel.py`
Content:
```
1 # Get all the modules from pwnlib
2 import collections
3 import logging
4 import math
5 import operator
6 import os
7 import platform
8 import re
9 import requests
10 import socks
11 import signal
12 import string
13 import struct
14 import subprocess
15 import sys
16 import tempfile
17 import threading
18 import time
19
20 from pprint import pprint
21
22 import pwnlib
23 from pwnlib import *
24 from pwnlib.asm import *
25 from pwnlib.context import Thread
26 from pwnlib.context import context, LocalContext
27 from pwnlib.dynelf import DynELF
28 from pwnlib.encoders import *
29 from pwnlib.elf.corefile import Core, Corefile, Coredump
30 from pwnlib.elf.elf import ELF, load
31 from pwnlib.encoders import *
32 from pwnlib.exception import PwnlibException
33 from pwnlib.gdb import attach, debug_assembly, debug_shellcode
34 from pwnlib.filepointer import *
35 from pwnlib.flag import *
36 from pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split
37 from pwnlib.log import getLogger
38 from pwnlib.memleak import MemLeak, RelativeMemLeak
39 from pwnlib.regsort import *
40 from pwnlib.replacements import *
41 from pwnlib.rop import ROP
42 from pwnlib.rop.call import AppendedArgument
43 from pwnlib.rop.srop import SigreturnFrame
44 from pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload
45 from pwnlib.runner import *
46 from pwnlib.term.readline import str_input
47 from pwnlib.timeout import Timeout
48 from pwnlib.tubes.listen import listen
49 from pwnlib.tubes.process import process, PTY, PIPE, STDOUT
50 from pwnlib.tubes.remote import remote, tcp, udp, connect
51 from pwnlib.tubes.serialtube import serialtube
52 from pwnlib.tubes.server import server
53 from pwnlib.tubes.ssh import ssh
54 from pwnlib.tubes.tube import tube
55 from pwnlib.ui import *
56 from pwnlib.util import crc
57 from pwnlib.util import iters
58 from pwnlib.util import net
59 from pwnlib.util import proc
60 from pwnlib.util import safeeval
61 from pwnlib.util.crc import BitPolynom
62 from pwnlib.util.cyclic import *
63 from pwnlib.util.fiddling import *
64 from pwnlib.util.getdents import *
65 from pwnlib.util.hashes import *
66 from pwnlib.util.lists import *
67 from pwnlib.util.misc import *
68 from pwnlib.util.packing import *
69 from pwnlib.util.proc import pidof
70 from pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with
71 from pwnlib.util.splash import *
72 from pwnlib.util.web import *
73
74 # Promote these modules, so that "from pwn import *" will let you access them
75
76 from six.moves import cPickle as pickle, cStringIO as StringIO
77 from six import BytesIO
78
79 error = log.error
80 warning = log.warning
81 warn = log.warning
82 info = log.info
83 debug = log.debug
84 success = log.success
85
86 __all__ = [x for x in tuple(globals()) if x != '__name__']
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwn/toplevel.py b/pwn/toplevel.py
--- a/pwn/toplevel.py
+++ b/pwn/toplevel.py
@@ -83,4 +83,5 @@
debug = log.debug
success = log.success
-__all__ = [x for x in tuple(globals()) if x != '__name__']
+# Equivalence with the default behavior of "from import *"
+# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]
| {"golden_diff": "diff --git a/pwn/toplevel.py b/pwn/toplevel.py\n--- a/pwn/toplevel.py\n+++ b/pwn/toplevel.py\n@@ -83,4 +83,5 @@\n debug = log.debug\n success = log.success\n \n-__all__ = [x for x in tuple(globals()) if x != '__name__']\n+# Equivalence with the default behavior of \"from import *\"\n+# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]\n", "issue": "List comprehension in __all__ prevents Pylance from working\nThanks for contributing to Pwntools! Ideas from the community help make Pwntools an amazing tool for everybody.\r\n\r\nIf you've got an idea for a new feature, please provide information about:\r\n\r\n* What the feature does\r\nAccording to https://github.com/microsoft/pylance-release/issues/289, the list comprehension in `__all__` in https://github.com/Gallopsled/pwntools/blob/4e6ccb0da17fb91e43a4f9e95edf4fd83806ba23/pwn/toplevel.py#L85 prevents [Pylance](https://marketplace.visualstudio.com/items?itemName=ms-python.vscode-pylance) from working (when using `from pwn import *` instead of manually importing all modules).\r\nhttps://github.com/compas-dev/compas/issues/621 may be a solution instead of listing all attributes manually to fix that.\r\n* Why the feature should exist\r\nTo make Pylance happy :smile: \r\n* What tests should be included\r\nTest in VS Code to ensure it works.\r\n\r\nIf you think you can write the feature yourself, please submit a Pull Request and we can review your changes!\r\n\n", "before_files": [{"content": "# Get all the modules from pwnlib\nimport collections\nimport logging\nimport math\nimport operator\nimport os\nimport platform\nimport re\nimport requests\nimport socks\nimport signal\nimport string\nimport struct\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nfrom pprint import pprint\n\nimport pwnlib\nfrom pwnlib import *\nfrom pwnlib.asm import *\nfrom pwnlib.context import Thread\nfrom pwnlib.context import context, LocalContext\nfrom pwnlib.dynelf import DynELF\nfrom pwnlib.encoders import *\nfrom pwnlib.elf.corefile import Core, Corefile, Coredump\nfrom pwnlib.elf.elf import ELF, load\nfrom pwnlib.encoders import *\nfrom pwnlib.exception import PwnlibException\nfrom pwnlib.gdb import attach, debug_assembly, debug_shellcode\nfrom pwnlib.filepointer import *\nfrom pwnlib.flag import *\nfrom pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split\nfrom pwnlib.log import getLogger\nfrom pwnlib.memleak import MemLeak, RelativeMemLeak\nfrom pwnlib.regsort import *\nfrom pwnlib.replacements import *\nfrom pwnlib.rop import ROP\nfrom pwnlib.rop.call import AppendedArgument\nfrom pwnlib.rop.srop import SigreturnFrame\nfrom pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload\nfrom pwnlib.runner import *\nfrom pwnlib.term.readline import str_input\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.tubes.listen import listen\nfrom pwnlib.tubes.process import process, PTY, PIPE, STDOUT\nfrom pwnlib.tubes.remote import remote, tcp, udp, connect\nfrom pwnlib.tubes.serialtube import serialtube\nfrom pwnlib.tubes.server import server\nfrom pwnlib.tubes.ssh import ssh\nfrom pwnlib.tubes.tube import tube\nfrom pwnlib.ui import *\nfrom pwnlib.util import crc\nfrom pwnlib.util import iters\nfrom pwnlib.util import net\nfrom pwnlib.util import proc\nfrom pwnlib.util import safeeval\nfrom pwnlib.util.crc import BitPolynom\nfrom pwnlib.util.cyclic import *\nfrom pwnlib.util.fiddling import *\nfrom pwnlib.util.getdents import *\nfrom pwnlib.util.hashes import *\nfrom pwnlib.util.lists import *\nfrom pwnlib.util.misc import *\nfrom pwnlib.util.packing import *\nfrom pwnlib.util.proc import pidof\nfrom pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with\nfrom pwnlib.util.splash import *\nfrom pwnlib.util.web import *\n\n# Promote these modules, so that \"from pwn import *\" will let you access them\n\nfrom six.moves import cPickle as pickle, cStringIO as StringIO\nfrom six import BytesIO\n\nerror = log.error\nwarning = log.warning\nwarn = log.warning\ninfo = log.info\ndebug = log.debug\nsuccess = log.success\n\n__all__ = [x for x in tuple(globals()) if x != '__name__']\n", "path": "pwn/toplevel.py"}], "after_files": [{"content": "# Get all the modules from pwnlib\nimport collections\nimport logging\nimport math\nimport operator\nimport os\nimport platform\nimport re\nimport requests\nimport socks\nimport signal\nimport string\nimport struct\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nfrom pprint import pprint\n\nimport pwnlib\nfrom pwnlib import *\nfrom pwnlib.asm import *\nfrom pwnlib.context import Thread\nfrom pwnlib.context import context, LocalContext\nfrom pwnlib.dynelf import DynELF\nfrom pwnlib.encoders import *\nfrom pwnlib.elf.corefile import Core, Corefile, Coredump\nfrom pwnlib.elf.elf import ELF, load\nfrom pwnlib.encoders import *\nfrom pwnlib.exception import PwnlibException\nfrom pwnlib.gdb import attach, debug_assembly, debug_shellcode\nfrom pwnlib.filepointer import *\nfrom pwnlib.flag import *\nfrom pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split\nfrom pwnlib.log import getLogger\nfrom pwnlib.memleak import MemLeak, RelativeMemLeak\nfrom pwnlib.regsort import *\nfrom pwnlib.replacements import *\nfrom pwnlib.rop import ROP\nfrom pwnlib.rop.call import AppendedArgument\nfrom pwnlib.rop.srop import SigreturnFrame\nfrom pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload\nfrom pwnlib.runner import *\nfrom pwnlib.term.readline import str_input\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.tubes.listen import listen\nfrom pwnlib.tubes.process import process, PTY, PIPE, STDOUT\nfrom pwnlib.tubes.remote import remote, tcp, udp, connect\nfrom pwnlib.tubes.serialtube import serialtube\nfrom pwnlib.tubes.server import server\nfrom pwnlib.tubes.ssh import ssh\nfrom pwnlib.tubes.tube import tube\nfrom pwnlib.ui import *\nfrom pwnlib.util import crc\nfrom pwnlib.util import iters\nfrom pwnlib.util import net\nfrom pwnlib.util import proc\nfrom pwnlib.util import safeeval\nfrom pwnlib.util.crc import BitPolynom\nfrom pwnlib.util.cyclic import *\nfrom pwnlib.util.fiddling import *\nfrom pwnlib.util.getdents import *\nfrom pwnlib.util.hashes import *\nfrom pwnlib.util.lists import *\nfrom pwnlib.util.misc import *\nfrom pwnlib.util.packing import *\nfrom pwnlib.util.proc import pidof\nfrom pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with\nfrom pwnlib.util.splash import *\nfrom pwnlib.util.web import *\n\n# Promote these modules, so that \"from pwn import *\" will let you access them\n\nfrom six.moves import cPickle as pickle, cStringIO as StringIO\nfrom six import BytesIO\n\nerror = log.error\nwarning = log.warning\nwarn = log.warning\ninfo = log.info\ndebug = log.debug\nsuccess = log.success\n\n# Equivalence with the default behavior of \"from import *\"\n# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]\n", "path": "pwn/toplevel.py"}]} | 1,403 | 109 |
gh_patches_debug_14937 | rasdani/github-patches | git_diff | microsoft__DeepSpeed-4918 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nv-sd CI test failure
The Nightly CI for https://github.com/microsoft/DeepSpeed/actions/runs/7434747598 failed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepspeed/module_inject/containers/vae.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 # SPDX-License-Identifier: Apache-2.0
3
4 # DeepSpeed Team
5
6 from ..policy import DSPolicy
7 from ...model_implementations.diffusers.vae import DSVAE
8
9
10 class VAEPolicy(DSPolicy):
11
12 def __init__(self):
13 super().__init__()
14 try:
15 import diffusers
16 if hasattr(diffusers.models.vae, "AutoencoderKL"):
17 self._orig_layer_class = diffusers.models.vae.AutoencoderKL
18 else:
19 # Diffusers >= 0.12.0 changes location of AutoencoderKL
20 self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL
21 except ImportError:
22 self._orig_layer_class = None
23
24 def match(self, module):
25 return isinstance(module, self._orig_layer_class)
26
27 def match_replaced(self, module):
28 return isinstance(module, DSVAE)
29
30 def apply(self, module, enable_cuda_graph=True):
31 # TODO(cmikeh2): Enable cuda graph should be an inference configuration
32 return DSVAE(module, enable_cuda_graph=enable_cuda_graph)
33
34 # NOTE (lekurile): Should we have a diffusers policy class?
35 def attention(self, client_module):
36 pass
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/deepspeed/module_inject/containers/vae.py b/deepspeed/module_inject/containers/vae.py
--- a/deepspeed/module_inject/containers/vae.py
+++ b/deepspeed/module_inject/containers/vae.py
@@ -13,11 +13,11 @@
super().__init__()
try:
import diffusers
- if hasattr(diffusers.models.vae, "AutoencoderKL"):
- self._orig_layer_class = diffusers.models.vae.AutoencoderKL
+ if hasattr(diffusers.models.autoencoders.vae, "AutoencoderKL"):
+ self._orig_layer_class = diffusers.models.autoencoders.vae.AutoencoderKL
else:
# Diffusers >= 0.12.0 changes location of AutoencoderKL
- self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL
+ self._orig_layer_class = diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL
except ImportError:
self._orig_layer_class = None
| {"golden_diff": "diff --git a/deepspeed/module_inject/containers/vae.py b/deepspeed/module_inject/containers/vae.py\n--- a/deepspeed/module_inject/containers/vae.py\n+++ b/deepspeed/module_inject/containers/vae.py\n@@ -13,11 +13,11 @@\n super().__init__()\n try:\n import diffusers\n- if hasattr(diffusers.models.vae, \"AutoencoderKL\"):\n- self._orig_layer_class = diffusers.models.vae.AutoencoderKL\n+ if hasattr(diffusers.models.autoencoders.vae, \"AutoencoderKL\"):\n+ self._orig_layer_class = diffusers.models.autoencoders.vae.AutoencoderKL\n else:\n # Diffusers >= 0.12.0 changes location of AutoencoderKL\n- self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL\n+ self._orig_layer_class = diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL\n except ImportError:\n self._orig_layer_class = None\n", "issue": "nv-sd CI test failure\nThe Nightly CI for https://github.com/microsoft/DeepSpeed/actions/runs/7434747598 failed.\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nfrom ..policy import DSPolicy\nfrom ...model_implementations.diffusers.vae import DSVAE\n\n\nclass VAEPolicy(DSPolicy):\n\n def __init__(self):\n super().__init__()\n try:\n import diffusers\n if hasattr(diffusers.models.vae, \"AutoencoderKL\"):\n self._orig_layer_class = diffusers.models.vae.AutoencoderKL\n else:\n # Diffusers >= 0.12.0 changes location of AutoencoderKL\n self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL\n except ImportError:\n self._orig_layer_class = None\n\n def match(self, module):\n return isinstance(module, self._orig_layer_class)\n\n def match_replaced(self, module):\n return isinstance(module, DSVAE)\n\n def apply(self, module, enable_cuda_graph=True):\n # TODO(cmikeh2): Enable cuda graph should be an inference configuration\n return DSVAE(module, enable_cuda_graph=enable_cuda_graph)\n\n # NOTE (lekurile): Should we have a diffusers policy class?\n def attention(self, client_module):\n pass\n", "path": "deepspeed/module_inject/containers/vae.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nfrom ..policy import DSPolicy\nfrom ...model_implementations.diffusers.vae import DSVAE\n\n\nclass VAEPolicy(DSPolicy):\n\n def __init__(self):\n super().__init__()\n try:\n import diffusers\n if hasattr(diffusers.models.autoencoders.vae, \"AutoencoderKL\"):\n self._orig_layer_class = diffusers.models.autoencoders.vae.AutoencoderKL\n else:\n # Diffusers >= 0.12.0 changes location of AutoencoderKL\n self._orig_layer_class = diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL\n except ImportError:\n self._orig_layer_class = None\n\n def match(self, module):\n return isinstance(module, self._orig_layer_class)\n\n def match_replaced(self, module):\n return isinstance(module, DSVAE)\n\n def apply(self, module, enable_cuda_graph=True):\n # TODO(cmikeh2): Enable cuda graph should be an inference configuration\n return DSVAE(module, enable_cuda_graph=enable_cuda_graph)\n\n # NOTE (lekurile): Should we have a diffusers policy class?\n def attention(self, client_module):\n pass\n", "path": "deepspeed/module_inject/containers/vae.py"}]} | 641 | 229 |
gh_patches_debug_37365 | rasdani/github-patches | git_diff | pantsbuild__pants-13583 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scala import extraction for inference
Inference for Scala will require (at a minimum) import extraction from Scala sources. In v1 this was accomplished with https://scalameta.org/, which still seems to be active.
https://scalameta.org/docs/trees/guide.html#parse-trees
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/scala/dependency_inference/rules.py`
Content:
```
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3 from __future__ import annotations
4
5 import logging
6
7 from pants.backend.scala.dependency_inference import scala_parser, symbol_mapper
8 from pants.backend.scala.dependency_inference.scala_parser import ScalaSourceDependencyAnalysis
9 from pants.backend.scala.subsystems.scala_infer import ScalaInferSubsystem
10 from pants.backend.scala.target_types import ScalaSourceField
11 from pants.build_graph.address import Address
12 from pants.core.util_rules.source_files import SourceFilesRequest
13 from pants.engine.internals.selectors import Get, MultiGet
14 from pants.engine.rules import collect_rules, rule
15 from pants.engine.target import (
16 Dependencies,
17 DependenciesRequest,
18 ExplicitlyProvidedDependencies,
19 InferDependenciesRequest,
20 InferredDependencies,
21 WrappedTarget,
22 )
23 from pants.engine.unions import UnionRule
24 from pants.jvm.dependency_inference.symbol_mapper import FirstPartySymbolMapping
25 from pants.util.ordered_set import OrderedSet
26
27 logger = logging.getLogger(__name__)
28
29
30 class InferScalaSourceDependencies(InferDependenciesRequest):
31 infer_from = ScalaSourceField
32
33
34 @rule(desc="Inferring Scala dependencies by analyzing sources")
35 async def infer_scala_dependencies_via_source_analysis(
36 request: InferScalaSourceDependencies,
37 scala_infer_subsystem: ScalaInferSubsystem,
38 first_party_symbol_map: FirstPartySymbolMapping,
39 ) -> InferredDependencies:
40 if not scala_infer_subsystem.imports:
41 return InferredDependencies([])
42
43 address = request.sources_field.address
44 wrapped_tgt = await Get(WrappedTarget, Address, address)
45 explicitly_provided_deps, analysis = await MultiGet(
46 Get(ExplicitlyProvidedDependencies, DependenciesRequest(wrapped_tgt.target[Dependencies])),
47 Get(ScalaSourceDependencyAnalysis, SourceFilesRequest([request.sources_field])),
48 )
49
50 symbols: OrderedSet[str] = OrderedSet()
51 if scala_infer_subsystem.imports:
52 symbols.update(analysis.all_imports())
53
54 dependencies: OrderedSet[Address] = OrderedSet()
55 for symbol in symbols:
56 matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)
57 if not matches:
58 continue
59
60 explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference(
61 matches,
62 address,
63 import_reference="type",
64 context=f"The target {address} imports `{symbol}`",
65 )
66
67 maybe_disambiguated = explicitly_provided_deps.disambiguated(matches)
68 if maybe_disambiguated:
69 dependencies.add(maybe_disambiguated)
70
71 return InferredDependencies(dependencies)
72
73
74 def rules():
75 return [
76 *collect_rules(),
77 *scala_parser.rules(),
78 *symbol_mapper.rules(),
79 UnionRule(InferDependenciesRequest, InferScalaSourceDependencies),
80 ]
81
```
Path: `src/python/pants/backend/java/subsystems/java_infer.py`
Content:
```
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3 from typing import cast
4
5 from pants.option.subsystem import Subsystem
6 from pants.util.docutil import git_url
7
8
9 class JavaInferSubsystem(Subsystem):
10 options_scope = "java-infer"
11 help = "Options controlling which dependencies will be inferred for Java targets."
12
13 @classmethod
14 def register_options(cls, register):
15 super().register_options(register)
16 register(
17 "--imports",
18 default=True,
19 type=bool,
20 help=("Infer a target's dependencies by parsing import statements from sources."),
21 )
22 register(
23 "--consumed-types",
24 default=True,
25 type=bool,
26 help=("Infer a target's dependencies by parsing consumed types from sources."),
27 )
28 register(
29 "--third-party-imports",
30 default=True,
31 type=bool,
32 help="Infer a target's third-party dependencies using Java import statements.",
33 )
34 _default_package_mapping_url = git_url(
35 "src/python/pants/backend/java/dependency_inference/jvm_artifact_mappings.py"
36 )
37 register(
38 "--third-party-import-mapping",
39 type=dict,
40 help=(
41 "A dictionary mapping a Java package path to a JVM artifact coordinate (GROUP:ARTIFACT) "
42 "without the version. The package path may be made recursive to match symbols in subpackages "
43 "by adding `.**` to the end of the package path. For example, specify `{'org.junit.**': 'junit:junit'} `"
44 "to infer a dependency on junit:junit for any file importing a symbol from org.junit or its "
45 f"subpackages. Pants also supplies a default package mapping ({_default_package_mapping_url})."
46 ),
47 )
48
49 @property
50 def imports(self) -> bool:
51 return cast(bool, self.options.imports)
52
53 @property
54 def consumed_types(self) -> bool:
55 return cast(bool, self.options.consumed_types)
56
57 @property
58 def third_party_imports(self) -> bool:
59 return cast(bool, self.options.third_party_imports)
60
61 @property
62 def third_party_import_mapping(self) -> dict:
63 return cast(dict, self.options.third_party_import_mapping)
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/pants/backend/java/subsystems/java_infer.py b/src/python/pants/backend/java/subsystems/java_infer.py
--- a/src/python/pants/backend/java/subsystems/java_infer.py
+++ b/src/python/pants/backend/java/subsystems/java_infer.py
@@ -34,6 +34,7 @@
_default_package_mapping_url = git_url(
"src/python/pants/backend/java/dependency_inference/jvm_artifact_mappings.py"
)
+ # TODO: Move to `coursier` or a generic `jvm` subsystem.
register(
"--third-party-import-mapping",
type=dict,
diff --git a/src/python/pants/backend/scala/dependency_inference/rules.py b/src/python/pants/backend/scala/dependency_inference/rules.py
--- a/src/python/pants/backend/scala/dependency_inference/rules.py
+++ b/src/python/pants/backend/scala/dependency_inference/rules.py
@@ -21,6 +21,12 @@
WrappedTarget,
)
from pants.engine.unions import UnionRule
+from pants.jvm.dependency_inference import artifact_mapper
+from pants.jvm.dependency_inference.artifact_mapper import (
+ AvailableThirdPartyArtifacts,
+ ThirdPartyPackageToArtifactMapping,
+ find_artifact_mapping,
+)
from pants.jvm.dependency_inference.symbol_mapper import FirstPartySymbolMapping
from pants.util.ordered_set import OrderedSet
@@ -36,6 +42,8 @@
request: InferScalaSourceDependencies,
scala_infer_subsystem: ScalaInferSubsystem,
first_party_symbol_map: FirstPartySymbolMapping,
+ third_party_artifact_mapping: ThirdPartyPackageToArtifactMapping,
+ available_artifacts: AvailableThirdPartyArtifacts,
) -> InferredDependencies:
if not scala_infer_subsystem.imports:
return InferredDependencies([])
@@ -53,7 +61,11 @@
dependencies: OrderedSet[Address] = OrderedSet()
for symbol in symbols:
- matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)
+ first_party_matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)
+ third_party_matches = find_artifact_mapping(
+ symbol, third_party_artifact_mapping, available_artifacts
+ )
+ matches = first_party_matches.union(third_party_matches)
if not matches:
continue
@@ -74,6 +86,7 @@
def rules():
return [
*collect_rules(),
+ *artifact_mapper.rules(),
*scala_parser.rules(),
*symbol_mapper.rules(),
UnionRule(InferDependenciesRequest, InferScalaSourceDependencies),
| {"golden_diff": "diff --git a/src/python/pants/backend/java/subsystems/java_infer.py b/src/python/pants/backend/java/subsystems/java_infer.py\n--- a/src/python/pants/backend/java/subsystems/java_infer.py\n+++ b/src/python/pants/backend/java/subsystems/java_infer.py\n@@ -34,6 +34,7 @@\n _default_package_mapping_url = git_url(\n \"src/python/pants/backend/java/dependency_inference/jvm_artifact_mappings.py\"\n )\n+ # TODO: Move to `coursier` or a generic `jvm` subsystem.\n register(\n \"--third-party-import-mapping\",\n type=dict,\ndiff --git a/src/python/pants/backend/scala/dependency_inference/rules.py b/src/python/pants/backend/scala/dependency_inference/rules.py\n--- a/src/python/pants/backend/scala/dependency_inference/rules.py\n+++ b/src/python/pants/backend/scala/dependency_inference/rules.py\n@@ -21,6 +21,12 @@\n WrappedTarget,\n )\n from pants.engine.unions import UnionRule\n+from pants.jvm.dependency_inference import artifact_mapper\n+from pants.jvm.dependency_inference.artifact_mapper import (\n+ AvailableThirdPartyArtifacts,\n+ ThirdPartyPackageToArtifactMapping,\n+ find_artifact_mapping,\n+)\n from pants.jvm.dependency_inference.symbol_mapper import FirstPartySymbolMapping\n from pants.util.ordered_set import OrderedSet\n \n@@ -36,6 +42,8 @@\n request: InferScalaSourceDependencies,\n scala_infer_subsystem: ScalaInferSubsystem,\n first_party_symbol_map: FirstPartySymbolMapping,\n+ third_party_artifact_mapping: ThirdPartyPackageToArtifactMapping,\n+ available_artifacts: AvailableThirdPartyArtifacts,\n ) -> InferredDependencies:\n if not scala_infer_subsystem.imports:\n return InferredDependencies([])\n@@ -53,7 +61,11 @@\n \n dependencies: OrderedSet[Address] = OrderedSet()\n for symbol in symbols:\n- matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)\n+ first_party_matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)\n+ third_party_matches = find_artifact_mapping(\n+ symbol, third_party_artifact_mapping, available_artifacts\n+ )\n+ matches = first_party_matches.union(third_party_matches)\n if not matches:\n continue\n \n@@ -74,6 +86,7 @@\n def rules():\n return [\n *collect_rules(),\n+ *artifact_mapper.rules(),\n *scala_parser.rules(),\n *symbol_mapper.rules(),\n UnionRule(InferDependenciesRequest, InferScalaSourceDependencies),\n", "issue": "Scala import extraction for inference\nInference for Scala will require (at a minimum) import extraction from Scala sources. In v1 this was accomplished with https://scalameta.org/, which still seems to be active.\r\n\r\nhttps://scalameta.org/docs/trees/guide.html#parse-trees\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nfrom __future__ import annotations\n\nimport logging\n\nfrom pants.backend.scala.dependency_inference import scala_parser, symbol_mapper\nfrom pants.backend.scala.dependency_inference.scala_parser import ScalaSourceDependencyAnalysis\nfrom pants.backend.scala.subsystems.scala_infer import ScalaInferSubsystem\nfrom pants.backend.scala.target_types import ScalaSourceField\nfrom pants.build_graph.address import Address\nfrom pants.core.util_rules.source_files import SourceFilesRequest\nfrom pants.engine.internals.selectors import Get, MultiGet\nfrom pants.engine.rules import collect_rules, rule\nfrom pants.engine.target import (\n Dependencies,\n DependenciesRequest,\n ExplicitlyProvidedDependencies,\n InferDependenciesRequest,\n InferredDependencies,\n WrappedTarget,\n)\nfrom pants.engine.unions import UnionRule\nfrom pants.jvm.dependency_inference.symbol_mapper import FirstPartySymbolMapping\nfrom pants.util.ordered_set import OrderedSet\n\nlogger = logging.getLogger(__name__)\n\n\nclass InferScalaSourceDependencies(InferDependenciesRequest):\n infer_from = ScalaSourceField\n\n\n@rule(desc=\"Inferring Scala dependencies by analyzing sources\")\nasync def infer_scala_dependencies_via_source_analysis(\n request: InferScalaSourceDependencies,\n scala_infer_subsystem: ScalaInferSubsystem,\n first_party_symbol_map: FirstPartySymbolMapping,\n) -> InferredDependencies:\n if not scala_infer_subsystem.imports:\n return InferredDependencies([])\n\n address = request.sources_field.address\n wrapped_tgt = await Get(WrappedTarget, Address, address)\n explicitly_provided_deps, analysis = await MultiGet(\n Get(ExplicitlyProvidedDependencies, DependenciesRequest(wrapped_tgt.target[Dependencies])),\n Get(ScalaSourceDependencyAnalysis, SourceFilesRequest([request.sources_field])),\n )\n\n symbols: OrderedSet[str] = OrderedSet()\n if scala_infer_subsystem.imports:\n symbols.update(analysis.all_imports())\n\n dependencies: OrderedSet[Address] = OrderedSet()\n for symbol in symbols:\n matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)\n if not matches:\n continue\n\n explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference(\n matches,\n address,\n import_reference=\"type\",\n context=f\"The target {address} imports `{symbol}`\",\n )\n\n maybe_disambiguated = explicitly_provided_deps.disambiguated(matches)\n if maybe_disambiguated:\n dependencies.add(maybe_disambiguated)\n\n return InferredDependencies(dependencies)\n\n\ndef rules():\n return [\n *collect_rules(),\n *scala_parser.rules(),\n *symbol_mapper.rules(),\n UnionRule(InferDependenciesRequest, InferScalaSourceDependencies),\n ]\n", "path": "src/python/pants/backend/scala/dependency_inference/rules.py"}, {"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nfrom typing import cast\n\nfrom pants.option.subsystem import Subsystem\nfrom pants.util.docutil import git_url\n\n\nclass JavaInferSubsystem(Subsystem):\n options_scope = \"java-infer\"\n help = \"Options controlling which dependencies will be inferred for Java targets.\"\n\n @classmethod\n def register_options(cls, register):\n super().register_options(register)\n register(\n \"--imports\",\n default=True,\n type=bool,\n help=(\"Infer a target's dependencies by parsing import statements from sources.\"),\n )\n register(\n \"--consumed-types\",\n default=True,\n type=bool,\n help=(\"Infer a target's dependencies by parsing consumed types from sources.\"),\n )\n register(\n \"--third-party-imports\",\n default=True,\n type=bool,\n help=\"Infer a target's third-party dependencies using Java import statements.\",\n )\n _default_package_mapping_url = git_url(\n \"src/python/pants/backend/java/dependency_inference/jvm_artifact_mappings.py\"\n )\n register(\n \"--third-party-import-mapping\",\n type=dict,\n help=(\n \"A dictionary mapping a Java package path to a JVM artifact coordinate (GROUP:ARTIFACT) \"\n \"without the version. The package path may be made recursive to match symbols in subpackages \"\n \"by adding `.**` to the end of the package path. For example, specify `{'org.junit.**': 'junit:junit'} `\"\n \"to infer a dependency on junit:junit for any file importing a symbol from org.junit or its \"\n f\"subpackages. Pants also supplies a default package mapping ({_default_package_mapping_url}).\"\n ),\n )\n\n @property\n def imports(self) -> bool:\n return cast(bool, self.options.imports)\n\n @property\n def consumed_types(self) -> bool:\n return cast(bool, self.options.consumed_types)\n\n @property\n def third_party_imports(self) -> bool:\n return cast(bool, self.options.third_party_imports)\n\n @property\n def third_party_import_mapping(self) -> dict:\n return cast(dict, self.options.third_party_import_mapping)\n", "path": "src/python/pants/backend/java/subsystems/java_infer.py"}], "after_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nfrom __future__ import annotations\n\nimport logging\n\nfrom pants.backend.scala.dependency_inference import scala_parser, symbol_mapper\nfrom pants.backend.scala.dependency_inference.scala_parser import ScalaSourceDependencyAnalysis\nfrom pants.backend.scala.subsystems.scala_infer import ScalaInferSubsystem\nfrom pants.backend.scala.target_types import ScalaSourceField\nfrom pants.build_graph.address import Address\nfrom pants.core.util_rules.source_files import SourceFilesRequest\nfrom pants.engine.internals.selectors import Get, MultiGet\nfrom pants.engine.rules import collect_rules, rule\nfrom pants.engine.target import (\n Dependencies,\n DependenciesRequest,\n ExplicitlyProvidedDependencies,\n InferDependenciesRequest,\n InferredDependencies,\n WrappedTarget,\n)\nfrom pants.engine.unions import UnionRule\nfrom pants.jvm.dependency_inference import artifact_mapper\nfrom pants.jvm.dependency_inference.artifact_mapper import (\n AvailableThirdPartyArtifacts,\n ThirdPartyPackageToArtifactMapping,\n find_artifact_mapping,\n)\nfrom pants.jvm.dependency_inference.symbol_mapper import FirstPartySymbolMapping\nfrom pants.util.ordered_set import OrderedSet\n\nlogger = logging.getLogger(__name__)\n\n\nclass InferScalaSourceDependencies(InferDependenciesRequest):\n infer_from = ScalaSourceField\n\n\n@rule(desc=\"Inferring Scala dependencies by analyzing sources\")\nasync def infer_scala_dependencies_via_source_analysis(\n request: InferScalaSourceDependencies,\n scala_infer_subsystem: ScalaInferSubsystem,\n first_party_symbol_map: FirstPartySymbolMapping,\n third_party_artifact_mapping: ThirdPartyPackageToArtifactMapping,\n available_artifacts: AvailableThirdPartyArtifacts,\n) -> InferredDependencies:\n if not scala_infer_subsystem.imports:\n return InferredDependencies([])\n\n address = request.sources_field.address\n wrapped_tgt = await Get(WrappedTarget, Address, address)\n explicitly_provided_deps, analysis = await MultiGet(\n Get(ExplicitlyProvidedDependencies, DependenciesRequest(wrapped_tgt.target[Dependencies])),\n Get(ScalaSourceDependencyAnalysis, SourceFilesRequest([request.sources_field])),\n )\n\n symbols: OrderedSet[str] = OrderedSet()\n if scala_infer_subsystem.imports:\n symbols.update(analysis.all_imports())\n\n dependencies: OrderedSet[Address] = OrderedSet()\n for symbol in symbols:\n first_party_matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)\n third_party_matches = find_artifact_mapping(\n symbol, third_party_artifact_mapping, available_artifacts\n )\n matches = first_party_matches.union(third_party_matches)\n if not matches:\n continue\n\n explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference(\n matches,\n address,\n import_reference=\"type\",\n context=f\"The target {address} imports `{symbol}`\",\n )\n\n maybe_disambiguated = explicitly_provided_deps.disambiguated(matches)\n if maybe_disambiguated:\n dependencies.add(maybe_disambiguated)\n\n return InferredDependencies(dependencies)\n\n\ndef rules():\n return [\n *collect_rules(),\n *artifact_mapper.rules(),\n *scala_parser.rules(),\n *symbol_mapper.rules(),\n UnionRule(InferDependenciesRequest, InferScalaSourceDependencies),\n ]\n", "path": "src/python/pants/backend/scala/dependency_inference/rules.py"}, {"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nfrom typing import cast\n\nfrom pants.option.subsystem import Subsystem\nfrom pants.util.docutil import git_url\n\n\nclass JavaInferSubsystem(Subsystem):\n options_scope = \"java-infer\"\n help = \"Options controlling which dependencies will be inferred for Java targets.\"\n\n @classmethod\n def register_options(cls, register):\n super().register_options(register)\n register(\n \"--imports\",\n default=True,\n type=bool,\n help=(\"Infer a target's dependencies by parsing import statements from sources.\"),\n )\n register(\n \"--consumed-types\",\n default=True,\n type=bool,\n help=(\"Infer a target's dependencies by parsing consumed types from sources.\"),\n )\n register(\n \"--third-party-imports\",\n default=True,\n type=bool,\n help=\"Infer a target's third-party dependencies using Java import statements.\",\n )\n _default_package_mapping_url = git_url(\n \"src/python/pants/backend/java/dependency_inference/jvm_artifact_mappings.py\"\n )\n # TODO: Move to `coursier` or a generic `jvm` subsystem.\n register(\n \"--third-party-import-mapping\",\n type=dict,\n help=(\n \"A dictionary mapping a Java package path to a JVM artifact coordinate (GROUP:ARTIFACT) \"\n \"without the version. The package path may be made recursive to match symbols in subpackages \"\n \"by adding `.**` to the end of the package path. For example, specify `{'org.junit.**': 'junit:junit'} `\"\n \"to infer a dependency on junit:junit for any file importing a symbol from org.junit or its \"\n f\"subpackages. Pants also supplies a default package mapping ({_default_package_mapping_url}).\"\n ),\n )\n\n @property\n def imports(self) -> bool:\n return cast(bool, self.options.imports)\n\n @property\n def consumed_types(self) -> bool:\n return cast(bool, self.options.consumed_types)\n\n @property\n def third_party_imports(self) -> bool:\n return cast(bool, self.options.third_party_imports)\n\n @property\n def third_party_import_mapping(self) -> dict:\n return cast(dict, self.options.third_party_import_mapping)\n", "path": "src/python/pants/backend/java/subsystems/java_infer.py"}]} | 1,706 | 572 |
gh_patches_debug_9049 | rasdani/github-patches | git_diff | avocado-framework__avocado-714 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Proper simple tests examples
Even though simple tests are, well, simple, let's have a couple of them in the examples directory.
A big reason for that is that we currently use wrappers as the simple tests examples in the Getting Started guide (`avocado list examples/wrappers`) which can be confusing to new users.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/bin/env python
2 # This program is free software; you can redistribute it and/or modify
3 # it under the terms of the GNU General Public License as published by
4 # the Free Software Foundation; either version 2 of the License, or
5 # (at your option) any later version.
6 #
7 # This program is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
10 #
11 # See LICENSE for more details.
12 #
13 # Copyright: Red Hat Inc. 2013-2014
14 # Author: Lucas Meneghel Rodrigues <[email protected]>
15
16 import glob
17 import os
18 # pylint: disable=E0611
19
20 from distutils.core import setup
21
22 from avocado import VERSION
23
24
25 VIRTUAL_ENV = 'VIRTUAL_ENV' in os.environ
26
27
28 def get_dir(system_path=None, virtual_path=None):
29 """
30 Retrieve VIRTUAL_ENV friendly path
31 :param system_path: Relative system path
32 :param virtual_path: Overrides system_path for virtual_env only
33 :return: VIRTUAL_ENV friendly path
34 """
35 if virtual_path is None:
36 virtual_path = system_path
37 if VIRTUAL_ENV:
38 if virtual_path is None:
39 virtual_path = []
40 return os.path.join(*virtual_path)
41 else:
42 if system_path is None:
43 system_path = []
44 return os.path.join(*(['/'] + system_path))
45
46
47 def get_tests_dir():
48 return get_dir(['usr', 'share', 'avocado', 'tests'], ['tests'])
49
50
51 def get_avocado_libexec_dir():
52 if VIRTUAL_ENV:
53 return get_dir(['libexec'])
54 elif os.path.exists('/usr/libexec'): # RHEL-like distro
55 return get_dir(['usr', 'libexec', 'avocado'])
56 else: # Debian-like distro
57 return get_dir(['usr', 'lib', 'avocado'])
58
59
60 def get_data_files():
61 data_files = [(get_dir(['etc', 'avocado']), ['etc/avocado/avocado.conf'])]
62 data_files += [(get_dir(['etc', 'avocado', 'conf.d']),
63 ['etc/avocado/conf.d/README', 'etc/avocado/conf.d/gdb.conf'])]
64 data_files += [(get_dir(['etc', 'avocado', 'sysinfo']),
65 ['etc/avocado/sysinfo/commands', 'etc/avocado/sysinfo/files',
66 'etc/avocado/sysinfo/profilers'])]
67 data_files += [(get_tests_dir(), glob.glob('examples/tests/*.py'))]
68 for data_dir in glob.glob('examples/tests/*.data'):
69 fmt_str = '%s/*' % data_dir
70 for f in glob.glob(fmt_str):
71 data_files += [(os.path.join(get_tests_dir(),
72 os.path.basename(data_dir)), [f])]
73 data_files.append((get_dir(['usr', 'share', 'doc', 'avocado'], ['.']),
74 ['man/avocado.rst', 'man/avocado-rest-client.rst']))
75 data_files += [(get_dir(['usr', 'share', 'avocado', 'wrappers'],
76 ['wrappers']),
77 glob.glob('examples/wrappers/*.sh'))]
78 data_files.append((get_avocado_libexec_dir(), glob.glob('libexec/*')))
79 return data_files
80
81
82 def _get_plugin_resource_files(path):
83 """
84 Given a path, return all the files in there to package
85 """
86 flist = []
87 for root, _, files in sorted(os.walk(path)):
88 for name in files:
89 fullname = os.path.join(root, name)
90 flist.append(fullname[len('avocado/core/plugins/'):])
91 return flist
92
93
94 def get_long_description():
95 with open('README.rst', 'r') as req:
96 req_contents = req.read()
97 return req_contents
98
99 if __name__ == '__main__':
100 setup(name='avocado',
101 version=VERSION,
102 description='Avocado Test Framework',
103 long_description=get_long_description(),
104 author='Avocado Developers',
105 author_email='[email protected]',
106 url='http://avocado-framework.github.io/',
107 packages=['avocado',
108 'avocado.core',
109 'avocado.core.plugins',
110 'avocado.utils',
111 'avocado.utils.external',
112 'avocado.core.remote',
113 'avocado.core.restclient',
114 'avocado.core.restclient.cli',
115 'avocado.core.restclient.cli.args',
116 'avocado.core.restclient.cli.actions'],
117 package_data={'avocado.core.plugins': _get_plugin_resource_files(
118 'avocado/core/plugins/resources')},
119 data_files=get_data_files(),
120 scripts=['scripts/avocado',
121 'scripts/avocado-rest-client'])
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -75,6 +75,11 @@
data_files += [(get_dir(['usr', 'share', 'avocado', 'wrappers'],
['wrappers']),
glob.glob('examples/wrappers/*.sh'))]
+
+ data_files += [(get_dir(['usr', 'share', 'avocado', 'simpletests'],
+ ['simpletests']),
+ glob.glob('examples/simpletests/*.sh'))]
+
data_files.append((get_avocado_libexec_dir(), glob.glob('libexec/*')))
return data_files
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -75,6 +75,11 @@\n data_files += [(get_dir(['usr', 'share', 'avocado', 'wrappers'],\n ['wrappers']),\n glob.glob('examples/wrappers/*.sh'))]\n+\n+ data_files += [(get_dir(['usr', 'share', 'avocado', 'simpletests'],\n+ ['simpletests']),\n+ glob.glob('examples/simpletests/*.sh'))]\n+\n data_files.append((get_avocado_libexec_dir(), glob.glob('libexec/*')))\n return data_files\n", "issue": "Proper simple tests examples\nEven though simple tests are, well, simple, let's have a couple of them in the examples directory.\n\nA big reason for that is that we currently use wrappers as the simple tests examples in the Getting Started guide (`avocado list examples/wrappers`) which can be confusing to new users.\n\n", "before_files": [{"content": "#!/bin/env python\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2013-2014\n# Author: Lucas Meneghel Rodrigues <[email protected]>\n\nimport glob\nimport os\n# pylint: disable=E0611\n\nfrom distutils.core import setup\n\nfrom avocado import VERSION\n\n\nVIRTUAL_ENV = 'VIRTUAL_ENV' in os.environ\n\n\ndef get_dir(system_path=None, virtual_path=None):\n \"\"\"\n Retrieve VIRTUAL_ENV friendly path\n :param system_path: Relative system path\n :param virtual_path: Overrides system_path for virtual_env only\n :return: VIRTUAL_ENV friendly path\n \"\"\"\n if virtual_path is None:\n virtual_path = system_path\n if VIRTUAL_ENV:\n if virtual_path is None:\n virtual_path = []\n return os.path.join(*virtual_path)\n else:\n if system_path is None:\n system_path = []\n return os.path.join(*(['/'] + system_path))\n\n\ndef get_tests_dir():\n return get_dir(['usr', 'share', 'avocado', 'tests'], ['tests'])\n\n\ndef get_avocado_libexec_dir():\n if VIRTUAL_ENV:\n return get_dir(['libexec'])\n elif os.path.exists('/usr/libexec'): # RHEL-like distro\n return get_dir(['usr', 'libexec', 'avocado'])\n else: # Debian-like distro\n return get_dir(['usr', 'lib', 'avocado'])\n\n\ndef get_data_files():\n data_files = [(get_dir(['etc', 'avocado']), ['etc/avocado/avocado.conf'])]\n data_files += [(get_dir(['etc', 'avocado', 'conf.d']),\n ['etc/avocado/conf.d/README', 'etc/avocado/conf.d/gdb.conf'])]\n data_files += [(get_dir(['etc', 'avocado', 'sysinfo']),\n ['etc/avocado/sysinfo/commands', 'etc/avocado/sysinfo/files',\n 'etc/avocado/sysinfo/profilers'])]\n data_files += [(get_tests_dir(), glob.glob('examples/tests/*.py'))]\n for data_dir in glob.glob('examples/tests/*.data'):\n fmt_str = '%s/*' % data_dir\n for f in glob.glob(fmt_str):\n data_files += [(os.path.join(get_tests_dir(),\n os.path.basename(data_dir)), [f])]\n data_files.append((get_dir(['usr', 'share', 'doc', 'avocado'], ['.']),\n ['man/avocado.rst', 'man/avocado-rest-client.rst']))\n data_files += [(get_dir(['usr', 'share', 'avocado', 'wrappers'],\n ['wrappers']),\n glob.glob('examples/wrappers/*.sh'))]\n data_files.append((get_avocado_libexec_dir(), glob.glob('libexec/*')))\n return data_files\n\n\ndef _get_plugin_resource_files(path):\n \"\"\"\n Given a path, return all the files in there to package\n \"\"\"\n flist = []\n for root, _, files in sorted(os.walk(path)):\n for name in files:\n fullname = os.path.join(root, name)\n flist.append(fullname[len('avocado/core/plugins/'):])\n return flist\n\n\ndef get_long_description():\n with open('README.rst', 'r') as req:\n req_contents = req.read()\n return req_contents\n\nif __name__ == '__main__':\n setup(name='avocado',\n version=VERSION,\n description='Avocado Test Framework',\n long_description=get_long_description(),\n author='Avocado Developers',\n author_email='[email protected]',\n url='http://avocado-framework.github.io/',\n packages=['avocado',\n 'avocado.core',\n 'avocado.core.plugins',\n 'avocado.utils',\n 'avocado.utils.external',\n 'avocado.core.remote',\n 'avocado.core.restclient',\n 'avocado.core.restclient.cli',\n 'avocado.core.restclient.cli.args',\n 'avocado.core.restclient.cli.actions'],\n package_data={'avocado.core.plugins': _get_plugin_resource_files(\n 'avocado/core/plugins/resources')},\n data_files=get_data_files(),\n scripts=['scripts/avocado',\n 'scripts/avocado-rest-client'])\n", "path": "setup.py"}], "after_files": [{"content": "#!/bin/env python\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2013-2014\n# Author: Lucas Meneghel Rodrigues <[email protected]>\n\nimport glob\nimport os\n# pylint: disable=E0611\n\nfrom distutils.core import setup\n\nfrom avocado import VERSION\n\n\nVIRTUAL_ENV = 'VIRTUAL_ENV' in os.environ\n\n\ndef get_dir(system_path=None, virtual_path=None):\n \"\"\"\n Retrieve VIRTUAL_ENV friendly path\n :param system_path: Relative system path\n :param virtual_path: Overrides system_path for virtual_env only\n :return: VIRTUAL_ENV friendly path\n \"\"\"\n if virtual_path is None:\n virtual_path = system_path\n if VIRTUAL_ENV:\n if virtual_path is None:\n virtual_path = []\n return os.path.join(*virtual_path)\n else:\n if system_path is None:\n system_path = []\n return os.path.join(*(['/'] + system_path))\n\n\ndef get_tests_dir():\n return get_dir(['usr', 'share', 'avocado', 'tests'], ['tests'])\n\n\ndef get_avocado_libexec_dir():\n if VIRTUAL_ENV:\n return get_dir(['libexec'])\n elif os.path.exists('/usr/libexec'): # RHEL-like distro\n return get_dir(['usr', 'libexec', 'avocado'])\n else: # Debian-like distro\n return get_dir(['usr', 'lib', 'avocado'])\n\n\ndef get_data_files():\n data_files = [(get_dir(['etc', 'avocado']), ['etc/avocado/avocado.conf'])]\n data_files += [(get_dir(['etc', 'avocado', 'conf.d']),\n ['etc/avocado/conf.d/README', 'etc/avocado/conf.d/gdb.conf'])]\n data_files += [(get_dir(['etc', 'avocado', 'sysinfo']),\n ['etc/avocado/sysinfo/commands', 'etc/avocado/sysinfo/files',\n 'etc/avocado/sysinfo/profilers'])]\n data_files += [(get_tests_dir(), glob.glob('examples/tests/*.py'))]\n for data_dir in glob.glob('examples/tests/*.data'):\n fmt_str = '%s/*' % data_dir\n for f in glob.glob(fmt_str):\n data_files += [(os.path.join(get_tests_dir(),\n os.path.basename(data_dir)), [f])]\n data_files.append((get_dir(['usr', 'share', 'doc', 'avocado'], ['.']),\n ['man/avocado.rst', 'man/avocado-rest-client.rst']))\n data_files += [(get_dir(['usr', 'share', 'avocado', 'wrappers'],\n ['wrappers']),\n glob.glob('examples/wrappers/*.sh'))]\n\n data_files += [(get_dir(['usr', 'share', 'avocado', 'simpletests'],\n ['simpletests']),\n glob.glob('examples/simpletests/*.sh'))]\n\n data_files.append((get_avocado_libexec_dir(), glob.glob('libexec/*')))\n return data_files\n\n\ndef _get_plugin_resource_files(path):\n \"\"\"\n Given a path, return all the files in there to package\n \"\"\"\n flist = []\n for root, _, files in sorted(os.walk(path)):\n for name in files:\n fullname = os.path.join(root, name)\n flist.append(fullname[len('avocado/core/plugins/'):])\n return flist\n\n\ndef get_long_description():\n with open('README.rst', 'r') as req:\n req_contents = req.read()\n return req_contents\n\nif __name__ == '__main__':\n setup(name='avocado',\n version=VERSION,\n description='Avocado Test Framework',\n long_description=get_long_description(),\n author='Avocado Developers',\n author_email='[email protected]',\n url='http://avocado-framework.github.io/',\n packages=['avocado',\n 'avocado.core',\n 'avocado.core.plugins',\n 'avocado.utils',\n 'avocado.utils.external',\n 'avocado.core.remote',\n 'avocado.core.restclient',\n 'avocado.core.restclient.cli',\n 'avocado.core.restclient.cli.args',\n 'avocado.core.restclient.cli.actions'],\n package_data={'avocado.core.plugins': _get_plugin_resource_files(\n 'avocado/core/plugins/resources')},\n data_files=get_data_files(),\n scripts=['scripts/avocado',\n 'scripts/avocado-rest-client'])\n", "path": "setup.py"}]} | 1,608 | 137 |
gh_patches_debug_19668 | rasdani/github-patches | git_diff | docker__docker-py-1050 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
login failed with default registry
I am using docker-py (1.8.0) and trying to using login API.
If I don't input `registry='https://index.docker.io/v1/'` .
It will raise exception as following:
```
docker.errors.APIError: 500 Server Error: Internal Server Error ("Unexpected status code [301] :")
```
But I saw https://github.com/docker/docker-py/blob/81edb398ebf7ce5c7ef14aa0739de5329589aabe/docker/api/daemon.py#L52 in source code. Should work with default registry.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/api/daemon.py`
Content:
```
1 import os
2 import warnings
3 from datetime import datetime
4
5 from ..auth import auth
6 from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
7 from ..utils import utils
8
9
10 class DaemonApiMixin(object):
11 def events(self, since=None, until=None, filters=None, decode=None):
12 if isinstance(since, datetime):
13 since = utils.datetime_to_timestamp(since)
14
15 if isinstance(until, datetime):
16 until = utils.datetime_to_timestamp(until)
17
18 if filters:
19 filters = utils.convert_filters(filters)
20
21 params = {
22 'since': since,
23 'until': until,
24 'filters': filters
25 }
26
27 return self._stream_helper(
28 self.get(self._url('/events'), params=params, stream=True),
29 decode=decode
30 )
31
32 def info(self):
33 return self._result(self._get(self._url("/info")), True)
34
35 def login(self, username, password=None, email=None, registry=None,
36 reauth=False, insecure_registry=False, dockercfg_path=None):
37 if insecure_registry:
38 warnings.warn(
39 INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),
40 DeprecationWarning
41 )
42
43 # If we don't have any auth data so far, try reloading the config file
44 # one more time in case anything showed up in there.
45 # If dockercfg_path is passed check to see if the config file exists,
46 # if so load that config.
47 if dockercfg_path and os.path.exists(dockercfg_path):
48 self._auth_configs = auth.load_config(dockercfg_path)
49 elif not self._auth_configs:
50 self._auth_configs = auth.load_config()
51
52 registry = registry or auth.INDEX_URL
53
54 authcfg = auth.resolve_authconfig(self._auth_configs, registry)
55 # If we found an existing auth config for this registry and username
56 # combination, we can return it immediately unless reauth is requested.
57 if authcfg and authcfg.get('username', None) == username \
58 and not reauth:
59 return authcfg
60
61 req_data = {
62 'username': username,
63 'password': password,
64 'email': email,
65 'serveraddress': registry,
66 }
67
68 response = self._post_json(self._url('/auth'), data=req_data)
69 if response.status_code == 200:
70 self._auth_configs[registry] = req_data
71 return self._result(response, json=True)
72
73 def ping(self):
74 return self._result(self._get(self._url('/_ping')))
75
76 def version(self, api_version=True):
77 url = self._url("/version", versioned_api=api_version)
78 return self._result(self._get(url), json=True)
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/api/daemon.py b/docker/api/daemon.py
--- a/docker/api/daemon.py
+++ b/docker/api/daemon.py
@@ -49,8 +49,6 @@
elif not self._auth_configs:
self._auth_configs = auth.load_config()
- registry = registry or auth.INDEX_URL
-
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
@@ -67,7 +65,7 @@
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
- self._auth_configs[registry] = req_data
+ self._auth_configs[registry or auth.INDEX_NAME] = req_data
return self._result(response, json=True)
def ping(self):
| {"golden_diff": "diff --git a/docker/api/daemon.py b/docker/api/daemon.py\n--- a/docker/api/daemon.py\n+++ b/docker/api/daemon.py\n@@ -49,8 +49,6 @@\n elif not self._auth_configs:\n self._auth_configs = auth.load_config()\n \n- registry = registry or auth.INDEX_URL\n-\n authcfg = auth.resolve_authconfig(self._auth_configs, registry)\n # If we found an existing auth config for this registry and username\n # combination, we can return it immediately unless reauth is requested.\n@@ -67,7 +65,7 @@\n \n response = self._post_json(self._url('/auth'), data=req_data)\n if response.status_code == 200:\n- self._auth_configs[registry] = req_data\n+ self._auth_configs[registry or auth.INDEX_NAME] = req_data\n return self._result(response, json=True)\n \n def ping(self):\n", "issue": "login failed with default registry\nI am using docker-py (1.8.0) and trying to using login API.\n\nIf I don't input `registry='https://index.docker.io/v1/'` .\nIt will raise exception as following:\n\n```\ndocker.errors.APIError: 500 Server Error: Internal Server Error (\"Unexpected status code [301] :\")\n```\n\nBut I saw https://github.com/docker/docker-py/blob/81edb398ebf7ce5c7ef14aa0739de5329589aabe/docker/api/daemon.py#L52 in source code. Should work with default registry.\n\n", "before_files": [{"content": "import os\nimport warnings\nfrom datetime import datetime\n\nfrom ..auth import auth\nfrom ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING\nfrom ..utils import utils\n\n\nclass DaemonApiMixin(object):\n def events(self, since=None, until=None, filters=None, decode=None):\n if isinstance(since, datetime):\n since = utils.datetime_to_timestamp(since)\n\n if isinstance(until, datetime):\n until = utils.datetime_to_timestamp(until)\n\n if filters:\n filters = utils.convert_filters(filters)\n\n params = {\n 'since': since,\n 'until': until,\n 'filters': filters\n }\n\n return self._stream_helper(\n self.get(self._url('/events'), params=params, stream=True),\n decode=decode\n )\n\n def info(self):\n return self._result(self._get(self._url(\"/info\")), True)\n\n def login(self, username, password=None, email=None, registry=None,\n reauth=False, insecure_registry=False, dockercfg_path=None):\n if insecure_registry:\n warnings.warn(\n INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),\n DeprecationWarning\n )\n\n # If we don't have any auth data so far, try reloading the config file\n # one more time in case anything showed up in there.\n # If dockercfg_path is passed check to see if the config file exists,\n # if so load that config.\n if dockercfg_path and os.path.exists(dockercfg_path):\n self._auth_configs = auth.load_config(dockercfg_path)\n elif not self._auth_configs:\n self._auth_configs = auth.load_config()\n\n registry = registry or auth.INDEX_URL\n\n authcfg = auth.resolve_authconfig(self._auth_configs, registry)\n # If we found an existing auth config for this registry and username\n # combination, we can return it immediately unless reauth is requested.\n if authcfg and authcfg.get('username', None) == username \\\n and not reauth:\n return authcfg\n\n req_data = {\n 'username': username,\n 'password': password,\n 'email': email,\n 'serveraddress': registry,\n }\n\n response = self._post_json(self._url('/auth'), data=req_data)\n if response.status_code == 200:\n self._auth_configs[registry] = req_data\n return self._result(response, json=True)\n\n def ping(self):\n return self._result(self._get(self._url('/_ping')))\n\n def version(self, api_version=True):\n url = self._url(\"/version\", versioned_api=api_version)\n return self._result(self._get(url), json=True)\n", "path": "docker/api/daemon.py"}], "after_files": [{"content": "import os\nimport warnings\nfrom datetime import datetime\n\nfrom ..auth import auth\nfrom ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING\nfrom ..utils import utils\n\n\nclass DaemonApiMixin(object):\n def events(self, since=None, until=None, filters=None, decode=None):\n if isinstance(since, datetime):\n since = utils.datetime_to_timestamp(since)\n\n if isinstance(until, datetime):\n until = utils.datetime_to_timestamp(until)\n\n if filters:\n filters = utils.convert_filters(filters)\n\n params = {\n 'since': since,\n 'until': until,\n 'filters': filters\n }\n\n return self._stream_helper(\n self.get(self._url('/events'), params=params, stream=True),\n decode=decode\n )\n\n def info(self):\n return self._result(self._get(self._url(\"/info\")), True)\n\n def login(self, username, password=None, email=None, registry=None,\n reauth=False, insecure_registry=False, dockercfg_path=None):\n if insecure_registry:\n warnings.warn(\n INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),\n DeprecationWarning\n )\n\n # If we don't have any auth data so far, try reloading the config file\n # one more time in case anything showed up in there.\n # If dockercfg_path is passed check to see if the config file exists,\n # if so load that config.\n if dockercfg_path and os.path.exists(dockercfg_path):\n self._auth_configs = auth.load_config(dockercfg_path)\n elif not self._auth_configs:\n self._auth_configs = auth.load_config()\n\n authcfg = auth.resolve_authconfig(self._auth_configs, registry)\n # If we found an existing auth config for this registry and username\n # combination, we can return it immediately unless reauth is requested.\n if authcfg and authcfg.get('username', None) == username \\\n and not reauth:\n return authcfg\n\n req_data = {\n 'username': username,\n 'password': password,\n 'email': email,\n 'serveraddress': registry,\n }\n\n response = self._post_json(self._url('/auth'), data=req_data)\n if response.status_code == 200:\n self._auth_configs[registry or auth.INDEX_NAME] = req_data\n return self._result(response, json=True)\n\n def ping(self):\n return self._result(self._get(self._url('/_ping')))\n\n def version(self, api_version=True):\n url = self._url(\"/version\", versioned_api=api_version)\n return self._result(self._get(url), json=True)\n", "path": "docker/api/daemon.py"}]} | 1,140 | 208 |
gh_patches_debug_29586 | rasdani/github-patches | git_diff | blaze__blaze-1114 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dask test failure
it seems `atop` is using an older dask API
```
================================================================================= FAILURES ==================================================================================
____________________________________________________________________________ test_compute[expr0] ____________________________________________________________________________
blaze/compute/tests/test_dask.py:69: in test_compute
result = compute(expr, dask_ns)
../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__
return func(*args, **kwargs)
blaze/compute/core.py:470: in compute
result = top_then_bottom_then_top_again_etc(expr3, d4, **kwargs)
blaze/compute/core.py:164: in top_then_bottom_then_top_again_etc
expr2, scope2 = bottom_up_until_type_break(expr, scope, **kwargs)
blaze/compute/core.py:371: in bottom_up_until_type_break
**kwargs)}
../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__
return func(*args, **kwargs)
blaze/compute/dask.py:40: in compute_broadcast
*concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))
../../../../code/py/dask/dask/array/core.py:1099: in atop
numblocks = dict([(a.name, a.numblocks) for a, ind in arginds])
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `blaze/compute/dask.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 from numbers import Number
4 from toolz import concat, first, curry, compose
5 from datashape import DataShape
6
7 from blaze import compute, ndim
8 from blaze.dispatch import dispatch
9 from blaze.compute.core import compute_up, optimize
10 from blaze.expr import (ElemWise, symbol, Reduction, Transpose, TensorDot,
11 Expr, Slice, Broadcast)
12 from blaze.expr.split import split
13
14 from dask.array.core import (_concatenate2, Array, atop, names, transpose,
15 tensordot)
16
17
18 def compute_it(expr, leaves, *data, **kwargs):
19 kwargs.pop('scope')
20 return compute(expr, dict(zip(leaves, data)), **kwargs)
21
22
23 def elemwise_array(expr, *data, **kwargs):
24 leaves = expr._inputs
25 expr_inds = tuple(range(ndim(expr)))[::-1]
26 return atop(curry(compute_it, expr, leaves, **kwargs),
27 next(names), expr_inds,
28 *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))
29
30
31 try:
32 from blaze.compute.numba import (get_numba_ufunc, broadcast_collect,
33 Broadcastable)
34
35 def compute_broadcast(expr, *data, **kwargs):
36 expr_inds = tuple(range(ndim(expr)))[::-1]
37 func = get_numba_ufunc(expr)
38 return atop(func,
39 next(names), expr_inds,
40 *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))
41
42 def optimize_array(expr, *data):
43 return broadcast_collect(expr, Broadcastable=Broadcastable,
44 WantToBroadcast=Broadcastable)
45
46 for i in range(5):
47 compute_up.register(Broadcast, *([(Array, Number)] * i))(compute_broadcast)
48 optimize.register(Expr, *([(Array, Number)] * i))(optimize_array)
49
50 except ImportError:
51 pass
52
53
54 for i in range(5):
55 compute_up.register(ElemWise, *([Array] * i))(elemwise_array)
56
57
58 @dispatch(Reduction, Array)
59 def compute_up(expr, data, **kwargs):
60 leaf = expr._leaves()[0]
61 chunk = symbol('chunk', DataShape(*(tuple(map(first, data.chunks)) +
62 (leaf.dshape.measure,))))
63 (chunk, chunk_expr), (agg, agg_expr) = split(expr._child, expr,
64 chunk=chunk)
65
66 inds = tuple(range(ndim(leaf)))
67 tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs),
68 next(names), inds,
69 data, inds)
70
71 return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),
72 curry(_concatenate2, axes=expr.axis)),
73 next(names), tuple(i for i in inds if i not in expr.axis),
74 tmp, inds)
75
76
77 @dispatch(Transpose, Array)
78 def compute_up(expr, data, **kwargs):
79 return transpose(data, expr.axes)
80
81
82 @dispatch(TensorDot, Array, Array)
83 def compute_up(expr, lhs, rhs, **kwargs):
84 return tensordot(lhs, rhs, (expr._left_axes, expr._right_axes))
85
86
87 @dispatch(Slice, Array)
88 def compute_up(expr, data, **kwargs):
89 return data[expr.index]
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/blaze/compute/dask.py b/blaze/compute/dask.py
--- a/blaze/compute/dask.py
+++ b/blaze/compute/dask.py
@@ -24,7 +24,7 @@
leaves = expr._inputs
expr_inds = tuple(range(ndim(expr)))[::-1]
return atop(curry(compute_it, expr, leaves, **kwargs),
- next(names), expr_inds,
+ expr_inds,
*concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))
@@ -36,7 +36,7 @@
expr_inds = tuple(range(ndim(expr)))[::-1]
func = get_numba_ufunc(expr)
return atop(func,
- next(names), expr_inds,
+ expr_inds,
*concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))
def optimize_array(expr, *data):
@@ -64,13 +64,12 @@
chunk=chunk)
inds = tuple(range(ndim(leaf)))
- tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs),
- next(names), inds,
- data, inds)
+ tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs), inds, data,
+ inds)
return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),
curry(_concatenate2, axes=expr.axis)),
- next(names), tuple(i for i in inds if i not in expr.axis),
+ tuple(i for i in inds if i not in expr.axis),
tmp, inds)
| {"golden_diff": "diff --git a/blaze/compute/dask.py b/blaze/compute/dask.py\n--- a/blaze/compute/dask.py\n+++ b/blaze/compute/dask.py\n@@ -24,7 +24,7 @@\n leaves = expr._inputs\n expr_inds = tuple(range(ndim(expr)))[::-1]\n return atop(curry(compute_it, expr, leaves, **kwargs),\n- next(names), expr_inds,\n+ expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n \n \n@@ -36,7 +36,7 @@\n expr_inds = tuple(range(ndim(expr)))[::-1]\n func = get_numba_ufunc(expr)\n return atop(func,\n- next(names), expr_inds,\n+ expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n \n def optimize_array(expr, *data):\n@@ -64,13 +64,12 @@\n chunk=chunk)\n \n inds = tuple(range(ndim(leaf)))\n- tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs),\n- next(names), inds,\n- data, inds)\n+ tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs), inds, data,\n+ inds)\n \n return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),\n curry(_concatenate2, axes=expr.axis)),\n- next(names), tuple(i for i in inds if i not in expr.axis),\n+ tuple(i for i in inds if i not in expr.axis),\n tmp, inds)\n", "issue": "dask test failure\nit seems `atop` is using an older dask API\n\n```\n================================================================================= FAILURES ==================================================================================\n____________________________________________________________________________ test_compute[expr0] ____________________________________________________________________________\nblaze/compute/tests/test_dask.py:69: in test_compute\n result = compute(expr, dask_ns)\n../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__\n return func(*args, **kwargs)\nblaze/compute/core.py:470: in compute\n result = top_then_bottom_then_top_again_etc(expr3, d4, **kwargs)\nblaze/compute/core.py:164: in top_then_bottom_then_top_again_etc\n expr2, scope2 = bottom_up_until_type_break(expr, scope, **kwargs)\nblaze/compute/core.py:371: in bottom_up_until_type_break\n **kwargs)}\n../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__\n return func(*args, **kwargs)\nblaze/compute/dask.py:40: in compute_broadcast\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n../../../../code/py/dask/dask/array/core.py:1099: in atop\n numblocks = dict([(a.name, a.numblocks) for a, ind in arginds])\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nfrom numbers import Number\nfrom toolz import concat, first, curry, compose\nfrom datashape import DataShape\n\nfrom blaze import compute, ndim\nfrom blaze.dispatch import dispatch\nfrom blaze.compute.core import compute_up, optimize\nfrom blaze.expr import (ElemWise, symbol, Reduction, Transpose, TensorDot,\n Expr, Slice, Broadcast)\nfrom blaze.expr.split import split\n\nfrom dask.array.core import (_concatenate2, Array, atop, names, transpose,\n tensordot)\n\n\ndef compute_it(expr, leaves, *data, **kwargs):\n kwargs.pop('scope')\n return compute(expr, dict(zip(leaves, data)), **kwargs)\n\n\ndef elemwise_array(expr, *data, **kwargs):\n leaves = expr._inputs\n expr_inds = tuple(range(ndim(expr)))[::-1]\n return atop(curry(compute_it, expr, leaves, **kwargs),\n next(names), expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n\n\ntry:\n from blaze.compute.numba import (get_numba_ufunc, broadcast_collect,\n Broadcastable)\n\n def compute_broadcast(expr, *data, **kwargs):\n expr_inds = tuple(range(ndim(expr)))[::-1]\n func = get_numba_ufunc(expr)\n return atop(func,\n next(names), expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n\n def optimize_array(expr, *data):\n return broadcast_collect(expr, Broadcastable=Broadcastable,\n WantToBroadcast=Broadcastable)\n\n for i in range(5):\n compute_up.register(Broadcast, *([(Array, Number)] * i))(compute_broadcast)\n optimize.register(Expr, *([(Array, Number)] * i))(optimize_array)\n\nexcept ImportError:\n pass\n\n\nfor i in range(5):\n compute_up.register(ElemWise, *([Array] * i))(elemwise_array)\n\n\n@dispatch(Reduction, Array)\ndef compute_up(expr, data, **kwargs):\n leaf = expr._leaves()[0]\n chunk = symbol('chunk', DataShape(*(tuple(map(first, data.chunks)) +\n (leaf.dshape.measure,))))\n (chunk, chunk_expr), (agg, agg_expr) = split(expr._child, expr,\n chunk=chunk)\n\n inds = tuple(range(ndim(leaf)))\n tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs),\n next(names), inds,\n data, inds)\n\n return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),\n curry(_concatenate2, axes=expr.axis)),\n next(names), tuple(i for i in inds if i not in expr.axis),\n tmp, inds)\n\n\n@dispatch(Transpose, Array)\ndef compute_up(expr, data, **kwargs):\n return transpose(data, expr.axes)\n\n\n@dispatch(TensorDot, Array, Array)\ndef compute_up(expr, lhs, rhs, **kwargs):\n return tensordot(lhs, rhs, (expr._left_axes, expr._right_axes))\n\n\n@dispatch(Slice, Array)\ndef compute_up(expr, data, **kwargs):\n return data[expr.index]\n", "path": "blaze/compute/dask.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nfrom numbers import Number\nfrom toolz import concat, first, curry, compose\nfrom datashape import DataShape\n\nfrom blaze import compute, ndim\nfrom blaze.dispatch import dispatch\nfrom blaze.compute.core import compute_up, optimize\nfrom blaze.expr import (ElemWise, symbol, Reduction, Transpose, TensorDot,\n Expr, Slice, Broadcast)\nfrom blaze.expr.split import split\n\nfrom dask.array.core import (_concatenate2, Array, atop, names, transpose,\n tensordot)\n\n\ndef compute_it(expr, leaves, *data, **kwargs):\n kwargs.pop('scope')\n return compute(expr, dict(zip(leaves, data)), **kwargs)\n\n\ndef elemwise_array(expr, *data, **kwargs):\n leaves = expr._inputs\n expr_inds = tuple(range(ndim(expr)))[::-1]\n return atop(curry(compute_it, expr, leaves, **kwargs),\n expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n\n\ntry:\n from blaze.compute.numba import (get_numba_ufunc, broadcast_collect,\n Broadcastable)\n\n def compute_broadcast(expr, *data, **kwargs):\n expr_inds = tuple(range(ndim(expr)))[::-1]\n func = get_numba_ufunc(expr)\n return atop(func,\n expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n\n def optimize_array(expr, *data):\n return broadcast_collect(expr, Broadcastable=Broadcastable,\n WantToBroadcast=Broadcastable)\n\n for i in range(5):\n compute_up.register(Broadcast, *([(Array, Number)] * i))(compute_broadcast)\n optimize.register(Expr, *([(Array, Number)] * i))(optimize_array)\n\nexcept ImportError:\n pass\n\n\nfor i in range(5):\n compute_up.register(ElemWise, *([Array] * i))(elemwise_array)\n\n\n@dispatch(Reduction, Array)\ndef compute_up(expr, data, **kwargs):\n leaf = expr._leaves()[0]\n chunk = symbol('chunk', DataShape(*(tuple(map(first, data.chunks)) +\n (leaf.dshape.measure,))))\n (chunk, chunk_expr), (agg, agg_expr) = split(expr._child, expr,\n chunk=chunk)\n\n inds = tuple(range(ndim(leaf)))\n tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs), inds, data,\n inds)\n\n return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),\n curry(_concatenate2, axes=expr.axis)),\n tuple(i for i in inds if i not in expr.axis),\n tmp, inds)\n\n\n@dispatch(Transpose, Array)\ndef compute_up(expr, data, **kwargs):\n return transpose(data, expr.axes)\n\n\n@dispatch(TensorDot, Array, Array)\ndef compute_up(expr, lhs, rhs, **kwargs):\n return tensordot(lhs, rhs, (expr._left_axes, expr._right_axes))\n\n\n@dispatch(Slice, Array)\ndef compute_up(expr, data, **kwargs):\n return data[expr.index]\n", "path": "blaze/compute/dask.py"}]} | 1,478 | 370 |
gh_patches_debug_14632 | rasdani/github-patches | git_diff | PyGithub__PyGithub-1053 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dismiss a PR review?
Am I reading the docs correctly and understanding that there is no support for [dismissing a PR review](https://developer.github.com/v3/pulls/reviews/#dismiss-a-pull-request-review)?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `github/PullRequestReview.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 ############################ Copyrights and license ############################
4 # #
5 # Copyright 2017 Aaron Levine <[email protected]> #
6 # Copyright 2017 Mike Miller <[email protected]> #
7 # Copyright 2018 Darragh Bailey <[email protected]> #
8 # Copyright 2018 Wan Liuyang <[email protected]> #
9 # Copyright 2018 sfdye <[email protected]> #
10 # #
11 # This file is part of PyGithub. #
12 # http://pygithub.readthedocs.io/ #
13 # #
14 # PyGithub is free software: you can redistribute it and/or modify it under #
15 # the terms of the GNU Lesser General Public License as published by the Free #
16 # Software Foundation, either version 3 of the License, or (at your option) #
17 # any later version. #
18 # #
19 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
20 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
21 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
22 # details. #
23 # #
24 # You should have received a copy of the GNU Lesser General Public License #
25 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
26 # #
27 ################################################################################
28
29 import github.GithubObject
30
31 import github.NamedUser
32
33
34 class PullRequestReview(github.GithubObject.CompletableGithubObject):
35 """
36 This class represents PullRequestReviews. The reference can be found here https://developer.github.com/v3/pulls/reviews/
37 """
38
39 def __repr__(self):
40 return self.get__repr__({"id": self._id.value, "user": self._user.value})
41
42 @property
43 def id(self):
44 """
45 :type: integer
46 """
47 self._completeIfNotSet(self._id)
48 return self._id.value
49
50 @property
51 def user(self):
52 """
53 :type: :class:`github.NamedUser.NamedUser`
54 """
55 self._completeIfNotSet(self._user)
56 return self._user.value
57
58 @property
59 def body(self):
60 """
61 :type: string
62 """
63 self._completeIfNotSet(self._body)
64 return self._body.value
65
66 @property
67 def commit_id(self):
68 """
69 :type: string
70 """
71 self._completeIfNotSet(self._commit_id)
72 return self._commit_id.value
73
74 @property
75 def state(self):
76 """
77 :type: string
78 """
79 self._completeIfNotSet(self._state)
80 return self._state.value
81
82 @property
83 def url(self):
84 """
85 :type: string
86 """
87 self._completeIfNotSet(self._url)
88 return self._url.value
89
90 @property
91 def html_url(self):
92 """
93 :type: string
94 """
95 self._completeIfNotSet(self._html_url)
96 return self._html_url.value
97
98 @property
99 def pull_request_url(self):
100 """
101 :type: string
102 """
103 self._completeIfNotSet(self._pull_request_url)
104 return self._pull_request_url.value
105
106 @property
107 def submitted_at(self):
108 """
109 :type: datetime.datetime
110 """
111 self._completeIfNotSet(self._submitted_at)
112 return self._submitted_at.value
113
114 def _initAttributes(self):
115 self._id = github.GithubObject.NotSet
116 self._user = github.GithubObject.NotSet
117 self._body = github.GithubObject.NotSet
118 self._commit_id = github.GithubObject.NotSet
119 self._state = github.GithubObject.NotSet
120 self._url = github.GithubObject.NotSet
121 self._html_url = github.GithubObject.NotSet
122 self._pull_request_url = github.GithubObject.NotSet
123 self._submitted_at = github.GithubObject.NotSet
124
125 def _useAttributes(self, attributes):
126 if "id" in attributes: # pragma no branch
127 self._id = self._makeIntAttribute(attributes["id"])
128 if "user" in attributes: # pragma no branch
129 self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
130 if "body" in attributes: # pragma no branch
131 self._body = self._makeStringAttribute(attributes["body"])
132 if "commit_id" in attributes: # pragma no branch
133 self._commit_id = self._makeStringAttribute(attributes["commit_id"])
134 if "state" in attributes: # pragma no branch
135 self._state = self._makeStringAttribute(attributes["state"])
136 if "url" in attributes: # pragma no branch
137 self._url = self._makeStringAttribute(attributes["url"])
138 if "html_url" in attributes: # pragma no branch
139 self._html_url = self._makeStringAttribute(attributes["html_url"])
140 if "pull_request_url" in attributes: # pragma no branch
141 self._pull_request_url = self._makeStringAttribute(attributes["pull_request_url"])
142 if "submitted_at" in attributes: # pragma no branch
143 self._submitted_at = self._makeDatetimeAttribute(attributes["submitted_at"])
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/github/PullRequestReview.py b/github/PullRequestReview.py
--- a/github/PullRequestReview.py
+++ b/github/PullRequestReview.py
@@ -111,6 +111,19 @@
self._completeIfNotSet(self._submitted_at)
return self._submitted_at.value
+ def dismiss(self, message):
+ """
+ :calls: `PUT /repos/:owner/:repo/pulls/:number/reviews/:review_id/dismissals <https://developer.github.com/v3/pulls/reviews/>`_
+ :rtype: None
+ """
+ assert isinstance(message, (str, unicode)), message
+ post_parameters = {'message': message}
+ headers, data = self._requester.requestJsonAndCheck(
+ "PUT",
+ self.pull_request_url + "/reviews/%s/dismissals" % self.id,
+ input=post_parameters
+ )
+
def _initAttributes(self):
self._id = github.GithubObject.NotSet
self._user = github.GithubObject.NotSet
| {"golden_diff": "diff --git a/github/PullRequestReview.py b/github/PullRequestReview.py\n--- a/github/PullRequestReview.py\n+++ b/github/PullRequestReview.py\n@@ -111,6 +111,19 @@\n self._completeIfNotSet(self._submitted_at)\n return self._submitted_at.value\n \n+ def dismiss(self, message):\n+ \"\"\"\n+ :calls: `PUT /repos/:owner/:repo/pulls/:number/reviews/:review_id/dismissals <https://developer.github.com/v3/pulls/reviews/>`_\n+ :rtype: None\n+ \"\"\"\n+ assert isinstance(message, (str, unicode)), message\n+ post_parameters = {'message': message}\n+ headers, data = self._requester.requestJsonAndCheck(\n+ \"PUT\",\n+ self.pull_request_url + \"/reviews/%s/dismissals\" % self.id,\n+ input=post_parameters\n+ )\n+\n def _initAttributes(self):\n self._id = github.GithubObject.NotSet\n self._user = github.GithubObject.NotSet\n", "issue": "dismiss a PR review?\nAm I reading the docs correctly and understanding that there is no support for [dismissing a PR review](https://developer.github.com/v3/pulls/reviews/#dismiss-a-pull-request-review)?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2017 Aaron Levine <[email protected]> #\n# Copyright 2017 Mike Miller <[email protected]> #\n# Copyright 2018 Darragh Bailey <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport github.GithubObject\n\nimport github.NamedUser\n\n\nclass PullRequestReview(github.GithubObject.CompletableGithubObject):\n \"\"\"\n This class represents PullRequestReviews. The reference can be found here https://developer.github.com/v3/pulls/reviews/\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__({\"id\": self._id.value, \"user\": self._user.value})\n\n @property\n def id(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._id)\n return self._id.value\n\n @property\n def user(self):\n \"\"\"\n :type: :class:`github.NamedUser.NamedUser`\n \"\"\"\n self._completeIfNotSet(self._user)\n return self._user.value\n\n @property\n def body(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._body)\n return self._body.value\n\n @property\n def commit_id(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._commit_id)\n return self._commit_id.value\n\n @property\n def state(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._state)\n return self._state.value\n\n @property\n def url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._url)\n return self._url.value\n\n @property\n def html_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._html_url)\n return self._html_url.value\n\n @property\n def pull_request_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._pull_request_url)\n return self._pull_request_url.value\n\n @property\n def submitted_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._submitted_at)\n return self._submitted_at.value\n\n def _initAttributes(self):\n self._id = github.GithubObject.NotSet\n self._user = github.GithubObject.NotSet\n self._body = github.GithubObject.NotSet\n self._commit_id = github.GithubObject.NotSet\n self._state = github.GithubObject.NotSet\n self._url = github.GithubObject.NotSet\n self._html_url = github.GithubObject.NotSet\n self._pull_request_url = github.GithubObject.NotSet\n self._submitted_at = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"id\" in attributes: # pragma no branch\n self._id = self._makeIntAttribute(attributes[\"id\"])\n if \"user\" in attributes: # pragma no branch\n self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes[\"user\"])\n if \"body\" in attributes: # pragma no branch\n self._body = self._makeStringAttribute(attributes[\"body\"])\n if \"commit_id\" in attributes: # pragma no branch\n self._commit_id = self._makeStringAttribute(attributes[\"commit_id\"])\n if \"state\" in attributes: # pragma no branch\n self._state = self._makeStringAttribute(attributes[\"state\"])\n if \"url\" in attributes: # pragma no branch\n self._url = self._makeStringAttribute(attributes[\"url\"])\n if \"html_url\" in attributes: # pragma no branch\n self._html_url = self._makeStringAttribute(attributes[\"html_url\"])\n if \"pull_request_url\" in attributes: # pragma no branch\n self._pull_request_url = self._makeStringAttribute(attributes[\"pull_request_url\"])\n if \"submitted_at\" in attributes: # pragma no branch\n self._submitted_at = self._makeDatetimeAttribute(attributes[\"submitted_at\"])\n", "path": "github/PullRequestReview.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2017 Aaron Levine <[email protected]> #\n# Copyright 2017 Mike Miller <[email protected]> #\n# Copyright 2018 Darragh Bailey <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport github.GithubObject\n\nimport github.NamedUser\n\n\nclass PullRequestReview(github.GithubObject.CompletableGithubObject):\n \"\"\"\n This class represents PullRequestReviews. The reference can be found here https://developer.github.com/v3/pulls/reviews/\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__({\"id\": self._id.value, \"user\": self._user.value})\n\n @property\n def id(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._id)\n return self._id.value\n\n @property\n def user(self):\n \"\"\"\n :type: :class:`github.NamedUser.NamedUser`\n \"\"\"\n self._completeIfNotSet(self._user)\n return self._user.value\n\n @property\n def body(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._body)\n return self._body.value\n\n @property\n def commit_id(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._commit_id)\n return self._commit_id.value\n\n @property\n def state(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._state)\n return self._state.value\n\n @property\n def url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._url)\n return self._url.value\n\n @property\n def html_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._html_url)\n return self._html_url.value\n\n @property\n def pull_request_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._pull_request_url)\n return self._pull_request_url.value\n\n @property\n def submitted_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._submitted_at)\n return self._submitted_at.value\n\n def dismiss(self, message):\n \"\"\"\n :calls: `PUT /repos/:owner/:repo/pulls/:number/reviews/:review_id/dismissals <https://developer.github.com/v3/pulls/reviews/>`_\n :rtype: None\n \"\"\"\n assert isinstance(message, (str, unicode)), message\n post_parameters = {'message': message}\n headers, data = self._requester.requestJsonAndCheck(\n \"PUT\",\n self.pull_request_url + \"/reviews/%s/dismissals\" % self.id,\n input=post_parameters\n )\n\n def _initAttributes(self):\n self._id = github.GithubObject.NotSet\n self._user = github.GithubObject.NotSet\n self._body = github.GithubObject.NotSet\n self._commit_id = github.GithubObject.NotSet\n self._state = github.GithubObject.NotSet\n self._url = github.GithubObject.NotSet\n self._html_url = github.GithubObject.NotSet\n self._pull_request_url = github.GithubObject.NotSet\n self._submitted_at = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"id\" in attributes: # pragma no branch\n self._id = self._makeIntAttribute(attributes[\"id\"])\n if \"user\" in attributes: # pragma no branch\n self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes[\"user\"])\n if \"body\" in attributes: # pragma no branch\n self._body = self._makeStringAttribute(attributes[\"body\"])\n if \"commit_id\" in attributes: # pragma no branch\n self._commit_id = self._makeStringAttribute(attributes[\"commit_id\"])\n if \"state\" in attributes: # pragma no branch\n self._state = self._makeStringAttribute(attributes[\"state\"])\n if \"url\" in attributes: # pragma no branch\n self._url = self._makeStringAttribute(attributes[\"url\"])\n if \"html_url\" in attributes: # pragma no branch\n self._html_url = self._makeStringAttribute(attributes[\"html_url\"])\n if \"pull_request_url\" in attributes: # pragma no branch\n self._pull_request_url = self._makeStringAttribute(attributes[\"pull_request_url\"])\n if \"submitted_at\" in attributes: # pragma no branch\n self._submitted_at = self._makeDatetimeAttribute(attributes[\"submitted_at\"])\n", "path": "github/PullRequestReview.py"}]} | 1,801 | 235 |
gh_patches_debug_19909 | rasdani/github-patches | git_diff | pyg-team__pytorch_geometric-5441 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The the feature dim of data.x is zero in Proteins dataset with the pyg version after 2.0.5
### 🐛 Describe the bug
The main reason is in line 136 of tu_dataset.py
it is strange that the value of num_edge_attributes is larger than the feature dimension of self.data.x in proteins, which leads to the resulting dimension of self.data.x is num_nodes*0
### Environment
* PyG version:
* PyTorch version:
* OS:
* Python version:
* CUDA/cuDNN version:
* How you installed PyTorch and PyG (`conda`, `pip`, source):
* Any other relevant information (*e.g.*, version of `torch-scatter`):
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch_geometric/io/tu.py`
Content:
```
1 import glob
2 import os
3 import os.path as osp
4
5 import numpy as np
6 import torch
7 import torch.nn.functional as F
8 from torch_sparse import coalesce
9
10 from torch_geometric.data import Data
11 from torch_geometric.io import read_txt_array
12 from torch_geometric.utils import remove_self_loops
13
14 names = [
15 'A', 'graph_indicator', 'node_labels', 'node_attributes'
16 'edge_labels', 'edge_attributes', 'graph_labels', 'graph_attributes'
17 ]
18
19
20 def read_tu_data(folder, prefix):
21 files = glob.glob(osp.join(folder, f'{prefix}_*.txt'))
22 names = [f.split(os.sep)[-1][len(prefix) + 1:-4] for f in files]
23
24 edge_index = read_file(folder, prefix, 'A', torch.long).t() - 1
25 batch = read_file(folder, prefix, 'graph_indicator', torch.long) - 1
26
27 node_attributes = torch.empty((batch.size(0), 0))
28 if 'node_attributes' in names:
29 node_attributes = read_file(folder, prefix, 'node_attributes')
30
31 node_labels = torch.empty((batch.size(0), 0))
32 if 'node_labels' in names:
33 node_labels = read_file(folder, prefix, 'node_labels', torch.long)
34 if node_labels.dim() == 1:
35 node_labels = node_labels.unsqueeze(-1)
36 node_labels = node_labels - node_labels.min(dim=0)[0]
37 node_labels = node_labels.unbind(dim=-1)
38 node_labels = [F.one_hot(x, num_classes=-1) for x in node_labels]
39 node_labels = torch.cat(node_labels, dim=-1).to(torch.float)
40
41 edge_attributes = torch.empty((edge_index.size(1), 0))
42 if 'edge_attributes' in names:
43 edge_attributes = read_file(folder, prefix, 'edge_attributes')
44
45 edge_labels = torch.empty((edge_index.size(1), 0))
46 if 'edge_labels' in names:
47 edge_labels = read_file(folder, prefix, 'edge_labels', torch.long)
48 if edge_labels.dim() == 1:
49 edge_labels = edge_labels.unsqueeze(-1)
50 edge_labels = edge_labels - edge_labels.min(dim=0)[0]
51 edge_labels = edge_labels.unbind(dim=-1)
52 edge_labels = [F.one_hot(e, num_classes=-1) for e in edge_labels]
53 edge_labels = torch.cat(edge_labels, dim=-1).to(torch.float)
54
55 x = cat([node_attributes, node_labels])
56 edge_attr = cat([edge_attributes, edge_labels])
57
58 y = None
59 if 'graph_attributes' in names: # Regression problem.
60 y = read_file(folder, prefix, 'graph_attributes')
61 elif 'graph_labels' in names: # Classification problem.
62 y = read_file(folder, prefix, 'graph_labels', torch.long)
63 _, y = y.unique(sorted=True, return_inverse=True)
64
65 num_nodes = edge_index.max().item() + 1 if x is None else x.size(0)
66 edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)
67 edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes,
68 num_nodes)
69
70 data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)
71 data, slices = split(data, batch)
72
73 sizes = {
74 'num_node_attributes': node_attributes.size(-1),
75 'num_node_labels': node_labels.size(-1),
76 'num_edge_attributes': edge_attributes.size(-1),
77 'num_edge_labels': edge_labels.size(-1),
78 }
79
80 return data, slices, sizes
81
82
83 def read_file(folder, prefix, name, dtype=None):
84 path = osp.join(folder, f'{prefix}_{name}.txt')
85 return read_txt_array(path, sep=',', dtype=dtype)
86
87
88 def cat(seq):
89 seq = [item for item in seq if item is not None]
90 seq = [item for item in seq if item.numel() > 0]
91 seq = [item.unsqueeze(-1) if item.dim() == 1 else item for item in seq]
92 return torch.cat(seq, dim=-1) if len(seq) > 0 else None
93
94
95 def split(data, batch):
96 node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0)
97 node_slice = torch.cat([torch.tensor([0]), node_slice])
98
99 row, _ = data.edge_index
100 edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0)
101 edge_slice = torch.cat([torch.tensor([0]), edge_slice])
102
103 # Edge indices should start at zero for every graph.
104 data.edge_index -= node_slice[batch[row]].unsqueeze(0)
105
106 slices = {'edge_index': edge_slice}
107 if data.x is not None:
108 slices['x'] = node_slice
109 else:
110 # Imitate `collate` functionality:
111 data._num_nodes = torch.bincount(batch).tolist()
112 data.num_nodes = batch.numel()
113 if data.edge_attr is not None:
114 slices['edge_attr'] = edge_slice
115 if data.y is not None:
116 if data.y.size(0) == batch.size(0):
117 slices['y'] = node_slice
118 else:
119 slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long)
120
121 return data, slices
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torch_geometric/io/tu.py b/torch_geometric/io/tu.py
--- a/torch_geometric/io/tu.py
+++ b/torch_geometric/io/tu.py
@@ -27,6 +27,8 @@
node_attributes = torch.empty((batch.size(0), 0))
if 'node_attributes' in names:
node_attributes = read_file(folder, prefix, 'node_attributes')
+ if node_attributes.dim() == 1:
+ node_attributes = node_attributes.unsqueeze(-1)
node_labels = torch.empty((batch.size(0), 0))
if 'node_labels' in names:
@@ -41,6 +43,8 @@
edge_attributes = torch.empty((edge_index.size(1), 0))
if 'edge_attributes' in names:
edge_attributes = read_file(folder, prefix, 'edge_attributes')
+ if edge_attributes.dim() == 1:
+ edge_attributes = edge_attributes.unsqueeze(-1)
edge_labels = torch.empty((edge_index.size(1), 0))
if 'edge_labels' in names:
| {"golden_diff": "diff --git a/torch_geometric/io/tu.py b/torch_geometric/io/tu.py\n--- a/torch_geometric/io/tu.py\n+++ b/torch_geometric/io/tu.py\n@@ -27,6 +27,8 @@\n node_attributes = torch.empty((batch.size(0), 0))\n if 'node_attributes' in names:\n node_attributes = read_file(folder, prefix, 'node_attributes')\n+ if node_attributes.dim() == 1:\n+ node_attributes = node_attributes.unsqueeze(-1)\n \n node_labels = torch.empty((batch.size(0), 0))\n if 'node_labels' in names:\n@@ -41,6 +43,8 @@\n edge_attributes = torch.empty((edge_index.size(1), 0))\n if 'edge_attributes' in names:\n edge_attributes = read_file(folder, prefix, 'edge_attributes')\n+ if edge_attributes.dim() == 1:\n+ edge_attributes = edge_attributes.unsqueeze(-1)\n \n edge_labels = torch.empty((edge_index.size(1), 0))\n if 'edge_labels' in names:\n", "issue": "The the feature dim of data.x is zero in Proteins dataset with the pyg version after 2.0.5\n### \ud83d\udc1b Describe the bug\n\nThe main reason is in line 136 of tu_dataset.py\r\n\r\nit is strange that the value of num_edge_attributes is larger than the feature dimension of self.data.x in proteins, which leads to the resulting dimension of self.data.x is num_nodes*0\r\n\n\n### Environment\n\n* PyG version:\r\n* PyTorch version:\r\n* OS:\r\n* Python version:\r\n* CUDA/cuDNN version:\r\n* How you installed PyTorch and PyG (`conda`, `pip`, source):\r\n* Any other relevant information (*e.g.*, version of `torch-scatter`):\r\n\n", "before_files": [{"content": "import glob\nimport os\nimport os.path as osp\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch_sparse import coalesce\n\nfrom torch_geometric.data import Data\nfrom torch_geometric.io import read_txt_array\nfrom torch_geometric.utils import remove_self_loops\n\nnames = [\n 'A', 'graph_indicator', 'node_labels', 'node_attributes'\n 'edge_labels', 'edge_attributes', 'graph_labels', 'graph_attributes'\n]\n\n\ndef read_tu_data(folder, prefix):\n files = glob.glob(osp.join(folder, f'{prefix}_*.txt'))\n names = [f.split(os.sep)[-1][len(prefix) + 1:-4] for f in files]\n\n edge_index = read_file(folder, prefix, 'A', torch.long).t() - 1\n batch = read_file(folder, prefix, 'graph_indicator', torch.long) - 1\n\n node_attributes = torch.empty((batch.size(0), 0))\n if 'node_attributes' in names:\n node_attributes = read_file(folder, prefix, 'node_attributes')\n\n node_labels = torch.empty((batch.size(0), 0))\n if 'node_labels' in names:\n node_labels = read_file(folder, prefix, 'node_labels', torch.long)\n if node_labels.dim() == 1:\n node_labels = node_labels.unsqueeze(-1)\n node_labels = node_labels - node_labels.min(dim=0)[0]\n node_labels = node_labels.unbind(dim=-1)\n node_labels = [F.one_hot(x, num_classes=-1) for x in node_labels]\n node_labels = torch.cat(node_labels, dim=-1).to(torch.float)\n\n edge_attributes = torch.empty((edge_index.size(1), 0))\n if 'edge_attributes' in names:\n edge_attributes = read_file(folder, prefix, 'edge_attributes')\n\n edge_labels = torch.empty((edge_index.size(1), 0))\n if 'edge_labels' in names:\n edge_labels = read_file(folder, prefix, 'edge_labels', torch.long)\n if edge_labels.dim() == 1:\n edge_labels = edge_labels.unsqueeze(-1)\n edge_labels = edge_labels - edge_labels.min(dim=0)[0]\n edge_labels = edge_labels.unbind(dim=-1)\n edge_labels = [F.one_hot(e, num_classes=-1) for e in edge_labels]\n edge_labels = torch.cat(edge_labels, dim=-1).to(torch.float)\n\n x = cat([node_attributes, node_labels])\n edge_attr = cat([edge_attributes, edge_labels])\n\n y = None\n if 'graph_attributes' in names: # Regression problem.\n y = read_file(folder, prefix, 'graph_attributes')\n elif 'graph_labels' in names: # Classification problem.\n y = read_file(folder, prefix, 'graph_labels', torch.long)\n _, y = y.unique(sorted=True, return_inverse=True)\n\n num_nodes = edge_index.max().item() + 1 if x is None else x.size(0)\n edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)\n edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes,\n num_nodes)\n\n data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)\n data, slices = split(data, batch)\n\n sizes = {\n 'num_node_attributes': node_attributes.size(-1),\n 'num_node_labels': node_labels.size(-1),\n 'num_edge_attributes': edge_attributes.size(-1),\n 'num_edge_labels': edge_labels.size(-1),\n }\n\n return data, slices, sizes\n\n\ndef read_file(folder, prefix, name, dtype=None):\n path = osp.join(folder, f'{prefix}_{name}.txt')\n return read_txt_array(path, sep=',', dtype=dtype)\n\n\ndef cat(seq):\n seq = [item for item in seq if item is not None]\n seq = [item for item in seq if item.numel() > 0]\n seq = [item.unsqueeze(-1) if item.dim() == 1 else item for item in seq]\n return torch.cat(seq, dim=-1) if len(seq) > 0 else None\n\n\ndef split(data, batch):\n node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0)\n node_slice = torch.cat([torch.tensor([0]), node_slice])\n\n row, _ = data.edge_index\n edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0)\n edge_slice = torch.cat([torch.tensor([0]), edge_slice])\n\n # Edge indices should start at zero for every graph.\n data.edge_index -= node_slice[batch[row]].unsqueeze(0)\n\n slices = {'edge_index': edge_slice}\n if data.x is not None:\n slices['x'] = node_slice\n else:\n # Imitate `collate` functionality:\n data._num_nodes = torch.bincount(batch).tolist()\n data.num_nodes = batch.numel()\n if data.edge_attr is not None:\n slices['edge_attr'] = edge_slice\n if data.y is not None:\n if data.y.size(0) == batch.size(0):\n slices['y'] = node_slice\n else:\n slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long)\n\n return data, slices\n", "path": "torch_geometric/io/tu.py"}], "after_files": [{"content": "import glob\nimport os\nimport os.path as osp\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch_sparse import coalesce\n\nfrom torch_geometric.data import Data\nfrom torch_geometric.io import read_txt_array\nfrom torch_geometric.utils import remove_self_loops\n\nnames = [\n 'A', 'graph_indicator', 'node_labels', 'node_attributes'\n 'edge_labels', 'edge_attributes', 'graph_labels', 'graph_attributes'\n]\n\n\ndef read_tu_data(folder, prefix):\n files = glob.glob(osp.join(folder, f'{prefix}_*.txt'))\n names = [f.split(os.sep)[-1][len(prefix) + 1:-4] for f in files]\n\n edge_index = read_file(folder, prefix, 'A', torch.long).t() - 1\n batch = read_file(folder, prefix, 'graph_indicator', torch.long) - 1\n\n node_attributes = torch.empty((batch.size(0), 0))\n if 'node_attributes' in names:\n node_attributes = read_file(folder, prefix, 'node_attributes')\n if node_attributes.dim() == 1:\n node_attributes = node_attributes.unsqueeze(-1)\n\n node_labels = torch.empty((batch.size(0), 0))\n if 'node_labels' in names:\n node_labels = read_file(folder, prefix, 'node_labels', torch.long)\n if node_labels.dim() == 1:\n node_labels = node_labels.unsqueeze(-1)\n node_labels = node_labels - node_labels.min(dim=0)[0]\n node_labels = node_labels.unbind(dim=-1)\n node_labels = [F.one_hot(x, num_classes=-1) for x in node_labels]\n node_labels = torch.cat(node_labels, dim=-1).to(torch.float)\n\n edge_attributes = torch.empty((edge_index.size(1), 0))\n if 'edge_attributes' in names:\n edge_attributes = read_file(folder, prefix, 'edge_attributes')\n if edge_attributes.dim() == 1:\n edge_attributes = edge_attributes.unsqueeze(-1)\n\n edge_labels = torch.empty((edge_index.size(1), 0))\n if 'edge_labels' in names:\n edge_labels = read_file(folder, prefix, 'edge_labels', torch.long)\n if edge_labels.dim() == 1:\n edge_labels = edge_labels.unsqueeze(-1)\n edge_labels = edge_labels - edge_labels.min(dim=0)[0]\n edge_labels = edge_labels.unbind(dim=-1)\n edge_labels = [F.one_hot(e, num_classes=-1) for e in edge_labels]\n edge_labels = torch.cat(edge_labels, dim=-1).to(torch.float)\n\n x = cat([node_attributes, node_labels])\n edge_attr = cat([edge_attributes, edge_labels])\n\n y = None\n if 'graph_attributes' in names: # Regression problem.\n y = read_file(folder, prefix, 'graph_attributes')\n elif 'graph_labels' in names: # Classification problem.\n y = read_file(folder, prefix, 'graph_labels', torch.long)\n _, y = y.unique(sorted=True, return_inverse=True)\n\n num_nodes = edge_index.max().item() + 1 if x is None else x.size(0)\n edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)\n edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes,\n num_nodes)\n\n data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)\n data, slices = split(data, batch)\n\n sizes = {\n 'num_node_attributes': node_attributes.size(-1),\n 'num_node_labels': node_labels.size(-1),\n 'num_edge_attributes': edge_attributes.size(-1),\n 'num_edge_labels': edge_labels.size(-1),\n }\n\n return data, slices, sizes\n\n\ndef read_file(folder, prefix, name, dtype=None):\n path = osp.join(folder, f'{prefix}_{name}.txt')\n return read_txt_array(path, sep=',', dtype=dtype)\n\n\ndef cat(seq):\n seq = [item for item in seq if item is not None]\n seq = [item for item in seq if item.numel() > 0]\n seq = [item.unsqueeze(-1) if item.dim() == 1 else item for item in seq]\n return torch.cat(seq, dim=-1) if len(seq) > 0 else None\n\n\ndef split(data, batch):\n node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0)\n node_slice = torch.cat([torch.tensor([0]), node_slice])\n\n row, _ = data.edge_index\n edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0)\n edge_slice = torch.cat([torch.tensor([0]), edge_slice])\n\n # Edge indices should start at zero for every graph.\n data.edge_index -= node_slice[batch[row]].unsqueeze(0)\n\n slices = {'edge_index': edge_slice}\n if data.x is not None:\n slices['x'] = node_slice\n else:\n # Imitate `collate` functionality:\n data._num_nodes = torch.bincount(batch).tolist()\n data.num_nodes = batch.numel()\n if data.edge_attr is not None:\n slices['edge_attr'] = edge_slice\n if data.y is not None:\n if data.y.size(0) == batch.size(0):\n slices['y'] = node_slice\n else:\n slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long)\n\n return data, slices\n", "path": "torch_geometric/io/tu.py"}]} | 1,849 | 240 |
gh_patches_debug_9496 | rasdani/github-patches | git_diff | rotki__rotki-2260 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Staked Cream price appears incorrectly
In version 1.13.0 of Rotki the staked Cream price and logo appear incorrectly. The CRM's price and logo show up instead of CREAM's.
The previous version of Rotki was showing the price correctly but the logo was still incorrect.
I think cryptocompare is used as price oracle for CREAM.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rotkehlchen/icons.py`
Content:
```
1 import itertools
2 import logging
3 from pathlib import Path
4 from typing import Optional, Set
5
6 import gevent
7 import requests
8 from typing_extensions import Literal
9
10 from rotkehlchen.assets.asset import Asset
11 from rotkehlchen.assets.resolver import AssetResolver, asset_type_mapping
12 from rotkehlchen.errors import RemoteError
13 from rotkehlchen.externalapis.coingecko import Coingecko, DELISTED_ASSETS
14 from rotkehlchen.typing import AssetType
15 from rotkehlchen.utils.hashing import file_md5
16 from rotkehlchen.constants.timing import DEFAULT_TIMEOUT_TUPLE
17
18 log = logging.getLogger(__name__)
19
20
21 class IconManager():
22 """
23 Manages the icons for all the assets of the application
24
25 The get_icon() and the periodic task of query_uncached_icons_batch() may at
26 a point query the same icon but that's fine and not worth of locking mechanism as
27 it should be rather rare and worst case scenario once in a blue moon we waste
28 an API call. In the end the right file would be written on disk.
29 """
30
31 def __init__(self, data_dir: Path, coingecko: Coingecko) -> None:
32 self.icons_dir = data_dir / 'icons'
33 self.coingecko = coingecko
34 self.icons_dir.mkdir(parents=True, exist_ok=True)
35 self.failed_assets: Set[Asset] = set()
36
37 def iconfile_path(self, asset: Asset, size: Literal['thumb', 'small', 'large']) -> Path:
38 return self.icons_dir / f'{asset.identifier}_{size}.png'
39
40 def iconfile_md5(
41 self,
42 asset: Asset,
43 size: Literal['thumb', 'small', 'large'],
44 ) -> Optional[str]:
45 path = self.iconfile_path(asset, size)
46 if not path.is_file():
47 return None
48
49 return file_md5(path)
50
51 def _query_coingecko_for_icon(self, asset: Asset) -> bool:
52 """Queries coingecko for icons of an asset
53
54 If query was okay it returns True, else False
55 """
56 # Do not bother querying if asset is delisted. Nothing is returned.
57 # we only keep delisted asset coingecko mappings since historical prices
58 # can still be queried.
59 if asset.identifier in DELISTED_ASSETS:
60 self.failed_assets.add(asset)
61 return False
62
63 try:
64 data = self.coingecko.asset_data(asset)
65 except RemoteError as e:
66 log.warning(
67 f'Problem querying coingecko for asset data of {asset.identifier}: {str(e)}',
68 )
69 # If a query fails (99% of fails will be 404s) don't repeat them
70 self.failed_assets.add(asset)
71 return False
72
73 for size in ('thumb', 'small', 'large'):
74 url = getattr(data.images, size)
75 try:
76 response = requests.get(url, timeout=DEFAULT_TIMEOUT_TUPLE)
77 except requests.exceptions.RequestException:
78 # Any problem getting the image skip it: https://github.com/rotki/rotki/issues/1370
79 continue
80
81 with open(self.iconfile_path(asset, size), 'wb') as f: # type: ignore
82 f.write(response.content)
83
84 return True
85
86 def get_icon(
87 self,
88 asset: Asset, given_size: Literal['thumb', 'small', 'large'],
89 ) -> Optional[bytes]:
90 """Returns the byte data of the requested icon
91
92 If the icon can't be found it returns None.
93
94 If the icon is found cached locally it's returned directly.
95
96 If not, all icons of the asset are queried from coingecko and cached
97 locally before the requested data are returned.
98 """
99 if not asset.has_coingecko():
100 return None
101
102 needed_path = self.iconfile_path(asset, given_size)
103 if needed_path.is_file():
104 with open(needed_path, 'rb') as f:
105 image_data = f.read()
106 return image_data
107
108 # else query coingecko for the icons and cache all of them
109 if self._query_coingecko_for_icon(asset) is False:
110 return None
111
112 if not needed_path.is_file():
113 return None
114
115 with open(needed_path, 'rb') as f:
116 image_data = f.read()
117 return image_data
118
119 def query_uncached_icons_batch(self, batch_size: int) -> bool:
120 """Queries a batch of uncached icons for assets
121
122 Returns true if there is more icons left to cache after this batch.
123 """
124 coingecko_integrated_assets = []
125
126 for identifier, asset_data in AssetResolver().assets.items():
127 asset_type = asset_type_mapping[asset_data['type']]
128 if asset_type != AssetType.FIAT and asset_data['coingecko'] != '':
129 coingecko_integrated_assets.append(identifier)
130
131 cached_assets = [
132 str(x.name)[:-10] for x in self.icons_dir.glob('*_thumb.png') if x.is_file()
133 ]
134 uncached_assets = (
135 set(coingecko_integrated_assets) - set(cached_assets) - self.failed_assets
136 )
137 log.info(
138 f'Periodic task to query coingecko for {batch_size} uncached asset icons. '
139 f'Uncached assets: {len(uncached_assets)}. Cached assets: {len(cached_assets)}',
140 )
141 for asset_name in itertools.islice(uncached_assets, batch_size):
142 self._query_coingecko_for_icon(Asset(asset_name))
143
144 return len(uncached_assets) > batch_size
145
146 def periodically_query_icons_until_all_cached(
147 self,
148 batch_size: int,
149 sleep_time_secs: float,
150 ) -> None:
151 """Periodically query all uncached icons until we have icons cached for all
152 of the known assets that have coingecko integration"""
153 if batch_size == 0:
154 return
155
156 while True:
157 carry_on = self.query_uncached_icons_batch(batch_size=batch_size)
158 if not carry_on:
159 break
160 gevent.sleep(sleep_time_secs)
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rotkehlchen/icons.py b/rotkehlchen/icons.py
--- a/rotkehlchen/icons.py
+++ b/rotkehlchen/icons.py
@@ -26,7 +26,7 @@
a point query the same icon but that's fine and not worth of locking mechanism as
it should be rather rare and worst case scenario once in a blue moon we waste
an API call. In the end the right file would be written on disk.
-"""
+ """
def __init__(self, data_dir: Path, coingecko: Coingecko) -> None:
self.icons_dir = data_dir / 'icons'
| {"golden_diff": "diff --git a/rotkehlchen/icons.py b/rotkehlchen/icons.py\n--- a/rotkehlchen/icons.py\n+++ b/rotkehlchen/icons.py\n@@ -26,7 +26,7 @@\n a point query the same icon but that's fine and not worth of locking mechanism as\n it should be rather rare and worst case scenario once in a blue moon we waste\n an API call. In the end the right file would be written on disk.\n-\"\"\"\n+ \"\"\"\n \n def __init__(self, data_dir: Path, coingecko: Coingecko) -> None:\n self.icons_dir = data_dir / 'icons'\n", "issue": "Staked Cream price appears incorrectly\nIn version 1.13.0 of Rotki the staked Cream price and logo appear incorrectly. The CRM's price and logo show up instead of CREAM's.\r\n\r\nThe previous version of Rotki was showing the price correctly but the logo was still incorrect.\r\n\r\nI think cryptocompare is used as price oracle for CREAM.\n", "before_files": [{"content": "import itertools\nimport logging\nfrom pathlib import Path\nfrom typing import Optional, Set\n\nimport gevent\nimport requests\nfrom typing_extensions import Literal\n\nfrom rotkehlchen.assets.asset import Asset\nfrom rotkehlchen.assets.resolver import AssetResolver, asset_type_mapping\nfrom rotkehlchen.errors import RemoteError\nfrom rotkehlchen.externalapis.coingecko import Coingecko, DELISTED_ASSETS\nfrom rotkehlchen.typing import AssetType\nfrom rotkehlchen.utils.hashing import file_md5\nfrom rotkehlchen.constants.timing import DEFAULT_TIMEOUT_TUPLE\n\nlog = logging.getLogger(__name__)\n\n\nclass IconManager():\n \"\"\"\n Manages the icons for all the assets of the application\n\n The get_icon() and the periodic task of query_uncached_icons_batch() may at\n a point query the same icon but that's fine and not worth of locking mechanism as\n it should be rather rare and worst case scenario once in a blue moon we waste\n an API call. In the end the right file would be written on disk.\n\"\"\"\n\n def __init__(self, data_dir: Path, coingecko: Coingecko) -> None:\n self.icons_dir = data_dir / 'icons'\n self.coingecko = coingecko\n self.icons_dir.mkdir(parents=True, exist_ok=True)\n self.failed_assets: Set[Asset] = set()\n\n def iconfile_path(self, asset: Asset, size: Literal['thumb', 'small', 'large']) -> Path:\n return self.icons_dir / f'{asset.identifier}_{size}.png'\n\n def iconfile_md5(\n self,\n asset: Asset,\n size: Literal['thumb', 'small', 'large'],\n ) -> Optional[str]:\n path = self.iconfile_path(asset, size)\n if not path.is_file():\n return None\n\n return file_md5(path)\n\n def _query_coingecko_for_icon(self, asset: Asset) -> bool:\n \"\"\"Queries coingecko for icons of an asset\n\n If query was okay it returns True, else False\n \"\"\"\n # Do not bother querying if asset is delisted. Nothing is returned.\n # we only keep delisted asset coingecko mappings since historical prices\n # can still be queried.\n if asset.identifier in DELISTED_ASSETS:\n self.failed_assets.add(asset)\n return False\n\n try:\n data = self.coingecko.asset_data(asset)\n except RemoteError as e:\n log.warning(\n f'Problem querying coingecko for asset data of {asset.identifier}: {str(e)}',\n )\n # If a query fails (99% of fails will be 404s) don't repeat them\n self.failed_assets.add(asset)\n return False\n\n for size in ('thumb', 'small', 'large'):\n url = getattr(data.images, size)\n try:\n response = requests.get(url, timeout=DEFAULT_TIMEOUT_TUPLE)\n except requests.exceptions.RequestException:\n # Any problem getting the image skip it: https://github.com/rotki/rotki/issues/1370\n continue\n\n with open(self.iconfile_path(asset, size), 'wb') as f: # type: ignore\n f.write(response.content)\n\n return True\n\n def get_icon(\n self,\n asset: Asset, given_size: Literal['thumb', 'small', 'large'],\n ) -> Optional[bytes]:\n \"\"\"Returns the byte data of the requested icon\n\n If the icon can't be found it returns None.\n\n If the icon is found cached locally it's returned directly.\n\n If not, all icons of the asset are queried from coingecko and cached\n locally before the requested data are returned.\n \"\"\"\n if not asset.has_coingecko():\n return None\n\n needed_path = self.iconfile_path(asset, given_size)\n if needed_path.is_file():\n with open(needed_path, 'rb') as f:\n image_data = f.read()\n return image_data\n\n # else query coingecko for the icons and cache all of them\n if self._query_coingecko_for_icon(asset) is False:\n return None\n\n if not needed_path.is_file():\n return None\n\n with open(needed_path, 'rb') as f:\n image_data = f.read()\n return image_data\n\n def query_uncached_icons_batch(self, batch_size: int) -> bool:\n \"\"\"Queries a batch of uncached icons for assets\n\n Returns true if there is more icons left to cache after this batch.\n \"\"\"\n coingecko_integrated_assets = []\n\n for identifier, asset_data in AssetResolver().assets.items():\n asset_type = asset_type_mapping[asset_data['type']]\n if asset_type != AssetType.FIAT and asset_data['coingecko'] != '':\n coingecko_integrated_assets.append(identifier)\n\n cached_assets = [\n str(x.name)[:-10] for x in self.icons_dir.glob('*_thumb.png') if x.is_file()\n ]\n uncached_assets = (\n set(coingecko_integrated_assets) - set(cached_assets) - self.failed_assets\n )\n log.info(\n f'Periodic task to query coingecko for {batch_size} uncached asset icons. '\n f'Uncached assets: {len(uncached_assets)}. Cached assets: {len(cached_assets)}',\n )\n for asset_name in itertools.islice(uncached_assets, batch_size):\n self._query_coingecko_for_icon(Asset(asset_name))\n\n return len(uncached_assets) > batch_size\n\n def periodically_query_icons_until_all_cached(\n self,\n batch_size: int,\n sleep_time_secs: float,\n ) -> None:\n \"\"\"Periodically query all uncached icons until we have icons cached for all\n of the known assets that have coingecko integration\"\"\"\n if batch_size == 0:\n return\n\n while True:\n carry_on = self.query_uncached_icons_batch(batch_size=batch_size)\n if not carry_on:\n break\n gevent.sleep(sleep_time_secs)\n", "path": "rotkehlchen/icons.py"}], "after_files": [{"content": "import itertools\nimport logging\nfrom pathlib import Path\nfrom typing import Optional, Set\n\nimport gevent\nimport requests\nfrom typing_extensions import Literal\n\nfrom rotkehlchen.assets.asset import Asset\nfrom rotkehlchen.assets.resolver import AssetResolver, asset_type_mapping\nfrom rotkehlchen.errors import RemoteError\nfrom rotkehlchen.externalapis.coingecko import Coingecko, DELISTED_ASSETS\nfrom rotkehlchen.typing import AssetType\nfrom rotkehlchen.utils.hashing import file_md5\nfrom rotkehlchen.constants.timing import DEFAULT_TIMEOUT_TUPLE\n\nlog = logging.getLogger(__name__)\n\n\nclass IconManager():\n \"\"\"\n Manages the icons for all the assets of the application\n\n The get_icon() and the periodic task of query_uncached_icons_batch() may at\n a point query the same icon but that's fine and not worth of locking mechanism as\n it should be rather rare and worst case scenario once in a blue moon we waste\n an API call. In the end the right file would be written on disk.\n \"\"\"\n\n def __init__(self, data_dir: Path, coingecko: Coingecko) -> None:\n self.icons_dir = data_dir / 'icons'\n self.coingecko = coingecko\n self.icons_dir.mkdir(parents=True, exist_ok=True)\n self.failed_assets: Set[Asset] = set()\n\n def iconfile_path(self, asset: Asset, size: Literal['thumb', 'small', 'large']) -> Path:\n return self.icons_dir / f'{asset.identifier}_{size}.png'\n\n def iconfile_md5(\n self,\n asset: Asset,\n size: Literal['thumb', 'small', 'large'],\n ) -> Optional[str]:\n path = self.iconfile_path(asset, size)\n if not path.is_file():\n return None\n\n return file_md5(path)\n\n def _query_coingecko_for_icon(self, asset: Asset) -> bool:\n \"\"\"Queries coingecko for icons of an asset\n\n If query was okay it returns True, else False\n \"\"\"\n # Do not bother querying if asset is delisted. Nothing is returned.\n # we only keep delisted asset coingecko mappings since historical prices\n # can still be queried.\n if asset.identifier in DELISTED_ASSETS:\n self.failed_assets.add(asset)\n return False\n\n try:\n data = self.coingecko.asset_data(asset)\n except RemoteError as e:\n log.warning(\n f'Problem querying coingecko for asset data of {asset.identifier}: {str(e)}',\n )\n # If a query fails (99% of fails will be 404s) don't repeat them\n self.failed_assets.add(asset)\n return False\n\n for size in ('thumb', 'small', 'large'):\n url = getattr(data.images, size)\n try:\n response = requests.get(url, timeout=DEFAULT_TIMEOUT_TUPLE)\n except requests.exceptions.RequestException:\n # Any problem getting the image skip it: https://github.com/rotki/rotki/issues/1370\n continue\n\n with open(self.iconfile_path(asset, size), 'wb') as f: # type: ignore\n f.write(response.content)\n\n return True\n\n def get_icon(\n self,\n asset: Asset, given_size: Literal['thumb', 'small', 'large'],\n ) -> Optional[bytes]:\n \"\"\"Returns the byte data of the requested icon\n\n If the icon can't be found it returns None.\n\n If the icon is found cached locally it's returned directly.\n\n If not, all icons of the asset are queried from coingecko and cached\n locally before the requested data are returned.\n \"\"\"\n if not asset.has_coingecko():\n return None\n\n needed_path = self.iconfile_path(asset, given_size)\n if needed_path.is_file():\n with open(needed_path, 'rb') as f:\n image_data = f.read()\n return image_data\n\n # else query coingecko for the icons and cache all of them\n if self._query_coingecko_for_icon(asset) is False:\n return None\n\n if not needed_path.is_file():\n return None\n\n with open(needed_path, 'rb') as f:\n image_data = f.read()\n return image_data\n\n def query_uncached_icons_batch(self, batch_size: int) -> bool:\n \"\"\"Queries a batch of uncached icons for assets\n\n Returns true if there is more icons left to cache after this batch.\n \"\"\"\n coingecko_integrated_assets = []\n\n for identifier, asset_data in AssetResolver().assets.items():\n asset_type = asset_type_mapping[asset_data['type']]\n if asset_type != AssetType.FIAT and asset_data['coingecko'] != '':\n coingecko_integrated_assets.append(identifier)\n\n cached_assets = [\n str(x.name)[:-10] for x in self.icons_dir.glob('*_thumb.png') if x.is_file()\n ]\n uncached_assets = (\n set(coingecko_integrated_assets) - set(cached_assets) - self.failed_assets\n )\n log.info(\n f'Periodic task to query coingecko for {batch_size} uncached asset icons. '\n f'Uncached assets: {len(uncached_assets)}. Cached assets: {len(cached_assets)}',\n )\n for asset_name in itertools.islice(uncached_assets, batch_size):\n self._query_coingecko_for_icon(Asset(asset_name))\n\n return len(uncached_assets) > batch_size\n\n def periodically_query_icons_until_all_cached(\n self,\n batch_size: int,\n sleep_time_secs: float,\n ) -> None:\n \"\"\"Periodically query all uncached icons until we have icons cached for all\n of the known assets that have coingecko integration\"\"\"\n if batch_size == 0:\n return\n\n while True:\n carry_on = self.query_uncached_icons_batch(batch_size=batch_size)\n if not carry_on:\n break\n gevent.sleep(sleep_time_secs)\n", "path": "rotkehlchen/icons.py"}]} | 2,038 | 146 |
gh_patches_debug_30301 | rasdani/github-patches | git_diff | napari__napari-4445 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
npe2 plugins need to be added to `napari --info`
## 🐛 Bug
currently, `napari --info` doesn't include npe2 plugins
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/utils/info.py`
Content:
```
1 import os
2 import platform
3 import subprocess
4 import sys
5
6 import napari
7
8 OS_RELEASE_PATH = "/etc/os-release"
9
10
11 def _linux_sys_name():
12 """
13 Try to discover linux system name base on /etc/os-release file or lsb_release command output
14 https://www.freedesktop.org/software/systemd/man/os-release.html
15 """
16 if os.path.exists(OS_RELEASE_PATH):
17 with open(OS_RELEASE_PATH) as f_p:
18 data = {}
19 for line in f_p:
20 field, value = line.split("=")
21 data[field.strip()] = value.strip().strip('"')
22 if "PRETTY_NAME" in data:
23 return data["PRETTY_NAME"]
24 if "NAME" in data:
25 if "VERSION" in data:
26 return f'{data["NAME"]} {data["VERSION"]}'
27 if "VERSION_ID" in data:
28 return f'{data["NAME"]} {data["VERSION_ID"]}'
29 return f'{data["NAME"]} (no version)'
30
31 try:
32 res = subprocess.run(
33 ["lsb_release", "-d", "-r"], check=True, capture_output=True
34 )
35 text = res.stdout.decode()
36 data = {}
37 for line in text.split("\n"):
38 key, val = line.split(":")
39 data[key.strip()] = val.strip()
40 version_str = data["Description"]
41 if not version_str.endswith(data["Release"]):
42 version_str += " " + data["Release"]
43 return version_str
44 except subprocess.CalledProcessError:
45 pass
46 return ""
47
48
49 def _sys_name():
50 """
51 Discover MacOS or Linux Human readable information. For Linux provide information about distribution.
52 """
53 try:
54 if sys.platform == "linux":
55 return _linux_sys_name()
56 if sys.platform == "darwin":
57 try:
58 res = subprocess.run(
59 ["sw_vers", "-productVersion"],
60 check=True,
61 capture_output=True,
62 )
63 return f"MacOS {res.stdout.decode().strip()}"
64 except subprocess.CalledProcessError:
65 pass
66 except Exception:
67 pass
68 return ""
69
70
71 def sys_info(as_html=False):
72 """Gathers relevant module versions for troubleshooting purposes.
73
74 Parameters
75 ----------
76 as_html : bool
77 if True, info will be returned as HTML, suitable for a QTextEdit widget
78 """
79 from napari.plugins import plugin_manager
80
81 sys_version = sys.version.replace('\n', ' ')
82 text = (
83 f"<b>napari</b>: {napari.__version__}<br>"
84 f"<b>Platform</b>: {platform.platform()}<br>"
85 )
86
87 __sys_name = _sys_name()
88 if __sys_name:
89 text += f"<b>System</b>: {__sys_name}<br>"
90
91 text += f"<b>Python</b>: {sys_version}<br>"
92
93 try:
94 from qtpy import API_NAME, PYQT_VERSION, PYSIDE_VERSION, QtCore
95
96 if API_NAME == 'PySide2':
97 API_VERSION = PYSIDE_VERSION
98 elif API_NAME == 'PyQt5':
99 API_VERSION = PYQT_VERSION
100 else:
101 API_VERSION = ''
102
103 text += (
104 f"<b>Qt</b>: {QtCore.__version__}<br>"
105 f"<b>{API_NAME}</b>: {API_VERSION}<br>"
106 )
107
108 except Exception as e:
109 text += f"<b>Qt</b>: Import failed ({e})<br>"
110
111 modules = (
112 ('numpy', 'NumPy'),
113 ('scipy', 'SciPy'),
114 ('dask', 'Dask'),
115 ('vispy', 'VisPy'),
116 )
117
118 loaded = {}
119 for module, name in modules:
120 try:
121 loaded[module] = __import__(module)
122 text += f"<b>{name}</b>: {loaded[module].__version__}<br>"
123 except Exception as e:
124 text += f"<b>{name}</b>: Import failed ({e})<br>"
125
126 text += "<br><b>OpenGL:</b><br>"
127
128 if loaded.get('vispy', False):
129 sys_info_text = (
130 "<br>".join(
131 [
132 loaded['vispy'].sys_info().split("\n")[index]
133 for index in [-4, -3]
134 ]
135 )
136 .replace("'", "")
137 .replace("<br>", "<br> - ")
138 )
139 text += f' - {sys_info_text}<br>'
140 else:
141 text += " - failed to load vispy"
142
143 text += "<br><b>Screens:</b><br>"
144
145 try:
146 from qtpy.QtGui import QGuiApplication
147
148 screen_list = QGuiApplication.screens()
149 for i, screen in enumerate(screen_list, start=1):
150 text += f" - screen {i}: resolution {screen.geometry().width()}x{screen.geometry().height()}, scale {screen.devicePixelRatio()}<br>"
151 except Exception as e:
152 text += f" - failed to load screen information {e}"
153
154 plugin_manager.discover()
155 plugin_strings = []
156 for meta in plugin_manager.list_plugin_metadata():
157 plugin_name = meta.get('plugin_name')
158 if plugin_name == 'builtins':
159 continue
160 version = meta.get('version')
161 version_string = f": {version}" if version else ""
162 plugin_strings.append(f" - {plugin_name}{version_string}")
163 text += '<br><b>Plugins</b>:'
164 text += (
165 ("<br>" + "<br>".join(sorted(plugin_strings)))
166 if plugin_strings
167 else ' None'
168 )
169
170 if not as_html:
171 text = (
172 text.replace("<br>", "\n").replace("<b>", "").replace("</b>", "")
173 )
174 return text
175
176
177 citation_text = (
178 'napari contributors (2019). napari: a '
179 'multi-dimensional image viewer for python. '
180 'doi:10.5281/zenodo.3555620'
181 )
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/utils/info.py b/napari/utils/info.py
--- a/napari/utils/info.py
+++ b/napari/utils/info.py
@@ -76,6 +76,8 @@
as_html : bool
if True, info will be returned as HTML, suitable for a QTextEdit widget
"""
+ from npe2 import PluginManager as Npe2PluginManager
+
from napari.plugins import plugin_manager
sys_version = sys.version.replace('\n', ' ')
@@ -152,17 +154,27 @@
text += f" - failed to load screen information {e}"
plugin_manager.discover()
- plugin_strings = []
+ plugin_strings = {}
for meta in plugin_manager.list_plugin_metadata():
plugin_name = meta.get('plugin_name')
if plugin_name == 'builtins':
continue
version = meta.get('version')
version_string = f": {version}" if version else ""
- plugin_strings.append(f" - {plugin_name}{version_string}")
+ plugin_strings[plugin_name] = f" - {plugin_name}{version_string}"
+
+ npe2_plugin_manager = Npe2PluginManager.instance()
+ for manifest in npe2_plugin_manager.iter_manifests():
+ plugin_name = manifest.name
+ if plugin_name in ("napari", "builtins"):
+ continue
+ version = manifest.package_version
+ version_string = f": {version}" if version else ""
+ plugin_strings[plugin_name] = f" - {plugin_name}{version_string}"
+
text += '<br><b>Plugins</b>:'
text += (
- ("<br>" + "<br>".join(sorted(plugin_strings)))
+ ("<br>" + "<br>".join(sorted(plugin_strings.values())))
if plugin_strings
else ' None'
)
| {"golden_diff": "diff --git a/napari/utils/info.py b/napari/utils/info.py\n--- a/napari/utils/info.py\n+++ b/napari/utils/info.py\n@@ -76,6 +76,8 @@\n as_html : bool\n if True, info will be returned as HTML, suitable for a QTextEdit widget\n \"\"\"\n+ from npe2 import PluginManager as Npe2PluginManager\n+\n from napari.plugins import plugin_manager\n \n sys_version = sys.version.replace('\\n', ' ')\n@@ -152,17 +154,27 @@\n text += f\" - failed to load screen information {e}\"\n \n plugin_manager.discover()\n- plugin_strings = []\n+ plugin_strings = {}\n for meta in plugin_manager.list_plugin_metadata():\n plugin_name = meta.get('plugin_name')\n if plugin_name == 'builtins':\n continue\n version = meta.get('version')\n version_string = f\": {version}\" if version else \"\"\n- plugin_strings.append(f\" - {plugin_name}{version_string}\")\n+ plugin_strings[plugin_name] = f\" - {plugin_name}{version_string}\"\n+\n+ npe2_plugin_manager = Npe2PluginManager.instance()\n+ for manifest in npe2_plugin_manager.iter_manifests():\n+ plugin_name = manifest.name\n+ if plugin_name in (\"napari\", \"builtins\"):\n+ continue\n+ version = manifest.package_version\n+ version_string = f\": {version}\" if version else \"\"\n+ plugin_strings[plugin_name] = f\" - {plugin_name}{version_string}\"\n+\n text += '<br><b>Plugins</b>:'\n text += (\n- (\"<br>\" + \"<br>\".join(sorted(plugin_strings)))\n+ (\"<br>\" + \"<br>\".join(sorted(plugin_strings.values())))\n if plugin_strings\n else ' None'\n )\n", "issue": "npe2 plugins need to be added to `napari --info` \n## \ud83d\udc1b Bug\r\ncurrently, `napari --info` doesn't include npe2 plugins\n", "before_files": [{"content": "import os\nimport platform\nimport subprocess\nimport sys\n\nimport napari\n\nOS_RELEASE_PATH = \"/etc/os-release\"\n\n\ndef _linux_sys_name():\n \"\"\"\n Try to discover linux system name base on /etc/os-release file or lsb_release command output\n https://www.freedesktop.org/software/systemd/man/os-release.html\n \"\"\"\n if os.path.exists(OS_RELEASE_PATH):\n with open(OS_RELEASE_PATH) as f_p:\n data = {}\n for line in f_p:\n field, value = line.split(\"=\")\n data[field.strip()] = value.strip().strip('\"')\n if \"PRETTY_NAME\" in data:\n return data[\"PRETTY_NAME\"]\n if \"NAME\" in data:\n if \"VERSION\" in data:\n return f'{data[\"NAME\"]} {data[\"VERSION\"]}'\n if \"VERSION_ID\" in data:\n return f'{data[\"NAME\"]} {data[\"VERSION_ID\"]}'\n return f'{data[\"NAME\"]} (no version)'\n\n try:\n res = subprocess.run(\n [\"lsb_release\", \"-d\", \"-r\"], check=True, capture_output=True\n )\n text = res.stdout.decode()\n data = {}\n for line in text.split(\"\\n\"):\n key, val = line.split(\":\")\n data[key.strip()] = val.strip()\n version_str = data[\"Description\"]\n if not version_str.endswith(data[\"Release\"]):\n version_str += \" \" + data[\"Release\"]\n return version_str\n except subprocess.CalledProcessError:\n pass\n return \"\"\n\n\ndef _sys_name():\n \"\"\"\n Discover MacOS or Linux Human readable information. For Linux provide information about distribution.\n \"\"\"\n try:\n if sys.platform == \"linux\":\n return _linux_sys_name()\n if sys.platform == \"darwin\":\n try:\n res = subprocess.run(\n [\"sw_vers\", \"-productVersion\"],\n check=True,\n capture_output=True,\n )\n return f\"MacOS {res.stdout.decode().strip()}\"\n except subprocess.CalledProcessError:\n pass\n except Exception:\n pass\n return \"\"\n\n\ndef sys_info(as_html=False):\n \"\"\"Gathers relevant module versions for troubleshooting purposes.\n\n Parameters\n ----------\n as_html : bool\n if True, info will be returned as HTML, suitable for a QTextEdit widget\n \"\"\"\n from napari.plugins import plugin_manager\n\n sys_version = sys.version.replace('\\n', ' ')\n text = (\n f\"<b>napari</b>: {napari.__version__}<br>\"\n f\"<b>Platform</b>: {platform.platform()}<br>\"\n )\n\n __sys_name = _sys_name()\n if __sys_name:\n text += f\"<b>System</b>: {__sys_name}<br>\"\n\n text += f\"<b>Python</b>: {sys_version}<br>\"\n\n try:\n from qtpy import API_NAME, PYQT_VERSION, PYSIDE_VERSION, QtCore\n\n if API_NAME == 'PySide2':\n API_VERSION = PYSIDE_VERSION\n elif API_NAME == 'PyQt5':\n API_VERSION = PYQT_VERSION\n else:\n API_VERSION = ''\n\n text += (\n f\"<b>Qt</b>: {QtCore.__version__}<br>\"\n f\"<b>{API_NAME}</b>: {API_VERSION}<br>\"\n )\n\n except Exception as e:\n text += f\"<b>Qt</b>: Import failed ({e})<br>\"\n\n modules = (\n ('numpy', 'NumPy'),\n ('scipy', 'SciPy'),\n ('dask', 'Dask'),\n ('vispy', 'VisPy'),\n )\n\n loaded = {}\n for module, name in modules:\n try:\n loaded[module] = __import__(module)\n text += f\"<b>{name}</b>: {loaded[module].__version__}<br>\"\n except Exception as e:\n text += f\"<b>{name}</b>: Import failed ({e})<br>\"\n\n text += \"<br><b>OpenGL:</b><br>\"\n\n if loaded.get('vispy', False):\n sys_info_text = (\n \"<br>\".join(\n [\n loaded['vispy'].sys_info().split(\"\\n\")[index]\n for index in [-4, -3]\n ]\n )\n .replace(\"'\", \"\")\n .replace(\"<br>\", \"<br> - \")\n )\n text += f' - {sys_info_text}<br>'\n else:\n text += \" - failed to load vispy\"\n\n text += \"<br><b>Screens:</b><br>\"\n\n try:\n from qtpy.QtGui import QGuiApplication\n\n screen_list = QGuiApplication.screens()\n for i, screen in enumerate(screen_list, start=1):\n text += f\" - screen {i}: resolution {screen.geometry().width()}x{screen.geometry().height()}, scale {screen.devicePixelRatio()}<br>\"\n except Exception as e:\n text += f\" - failed to load screen information {e}\"\n\n plugin_manager.discover()\n plugin_strings = []\n for meta in plugin_manager.list_plugin_metadata():\n plugin_name = meta.get('plugin_name')\n if plugin_name == 'builtins':\n continue\n version = meta.get('version')\n version_string = f\": {version}\" if version else \"\"\n plugin_strings.append(f\" - {plugin_name}{version_string}\")\n text += '<br><b>Plugins</b>:'\n text += (\n (\"<br>\" + \"<br>\".join(sorted(plugin_strings)))\n if plugin_strings\n else ' None'\n )\n\n if not as_html:\n text = (\n text.replace(\"<br>\", \"\\n\").replace(\"<b>\", \"\").replace(\"</b>\", \"\")\n )\n return text\n\n\ncitation_text = (\n 'napari contributors (2019). napari: a '\n 'multi-dimensional image viewer for python. '\n 'doi:10.5281/zenodo.3555620'\n)\n", "path": "napari/utils/info.py"}], "after_files": [{"content": "import os\nimport platform\nimport subprocess\nimport sys\n\nimport napari\n\nOS_RELEASE_PATH = \"/etc/os-release\"\n\n\ndef _linux_sys_name():\n \"\"\"\n Try to discover linux system name base on /etc/os-release file or lsb_release command output\n https://www.freedesktop.org/software/systemd/man/os-release.html\n \"\"\"\n if os.path.exists(OS_RELEASE_PATH):\n with open(OS_RELEASE_PATH) as f_p:\n data = {}\n for line in f_p:\n field, value = line.split(\"=\")\n data[field.strip()] = value.strip().strip('\"')\n if \"PRETTY_NAME\" in data:\n return data[\"PRETTY_NAME\"]\n if \"NAME\" in data:\n if \"VERSION\" in data:\n return f'{data[\"NAME\"]} {data[\"VERSION\"]}'\n if \"VERSION_ID\" in data:\n return f'{data[\"NAME\"]} {data[\"VERSION_ID\"]}'\n return f'{data[\"NAME\"]} (no version)'\n\n try:\n res = subprocess.run(\n [\"lsb_release\", \"-d\", \"-r\"], check=True, capture_output=True\n )\n text = res.stdout.decode()\n data = {}\n for line in text.split(\"\\n\"):\n key, val = line.split(\":\")\n data[key.strip()] = val.strip()\n version_str = data[\"Description\"]\n if not version_str.endswith(data[\"Release\"]):\n version_str += \" \" + data[\"Release\"]\n return version_str\n except subprocess.CalledProcessError:\n pass\n return \"\"\n\n\ndef _sys_name():\n \"\"\"\n Discover MacOS or Linux Human readable information. For Linux provide information about distribution.\n \"\"\"\n try:\n if sys.platform == \"linux\":\n return _linux_sys_name()\n if sys.platform == \"darwin\":\n try:\n res = subprocess.run(\n [\"sw_vers\", \"-productVersion\"],\n check=True,\n capture_output=True,\n )\n return f\"MacOS {res.stdout.decode().strip()}\"\n except subprocess.CalledProcessError:\n pass\n except Exception:\n pass\n return \"\"\n\n\ndef sys_info(as_html=False):\n \"\"\"Gathers relevant module versions for troubleshooting purposes.\n\n Parameters\n ----------\n as_html : bool\n if True, info will be returned as HTML, suitable for a QTextEdit widget\n \"\"\"\n from npe2 import PluginManager as Npe2PluginManager\n\n from napari.plugins import plugin_manager\n\n sys_version = sys.version.replace('\\n', ' ')\n text = (\n f\"<b>napari</b>: {napari.__version__}<br>\"\n f\"<b>Platform</b>: {platform.platform()}<br>\"\n )\n\n __sys_name = _sys_name()\n if __sys_name:\n text += f\"<b>System</b>: {__sys_name}<br>\"\n\n text += f\"<b>Python</b>: {sys_version}<br>\"\n\n try:\n from qtpy import API_NAME, PYQT_VERSION, PYSIDE_VERSION, QtCore\n\n if API_NAME == 'PySide2':\n API_VERSION = PYSIDE_VERSION\n elif API_NAME == 'PyQt5':\n API_VERSION = PYQT_VERSION\n else:\n API_VERSION = ''\n\n text += (\n f\"<b>Qt</b>: {QtCore.__version__}<br>\"\n f\"<b>{API_NAME}</b>: {API_VERSION}<br>\"\n )\n\n except Exception as e:\n text += f\"<b>Qt</b>: Import failed ({e})<br>\"\n\n modules = (\n ('numpy', 'NumPy'),\n ('scipy', 'SciPy'),\n ('dask', 'Dask'),\n ('vispy', 'VisPy'),\n )\n\n loaded = {}\n for module, name in modules:\n try:\n loaded[module] = __import__(module)\n text += f\"<b>{name}</b>: {loaded[module].__version__}<br>\"\n except Exception as e:\n text += f\"<b>{name}</b>: Import failed ({e})<br>\"\n\n text += \"<br><b>OpenGL:</b><br>\"\n\n if loaded.get('vispy', False):\n sys_info_text = (\n \"<br>\".join(\n [\n loaded['vispy'].sys_info().split(\"\\n\")[index]\n for index in [-4, -3]\n ]\n )\n .replace(\"'\", \"\")\n .replace(\"<br>\", \"<br> - \")\n )\n text += f' - {sys_info_text}<br>'\n else:\n text += \" - failed to load vispy\"\n\n text += \"<br><b>Screens:</b><br>\"\n\n try:\n from qtpy.QtGui import QGuiApplication\n\n screen_list = QGuiApplication.screens()\n for i, screen in enumerate(screen_list, start=1):\n text += f\" - screen {i}: resolution {screen.geometry().width()}x{screen.geometry().height()}, scale {screen.devicePixelRatio()}<br>\"\n except Exception as e:\n text += f\" - failed to load screen information {e}\"\n\n plugin_manager.discover()\n plugin_strings = {}\n for meta in plugin_manager.list_plugin_metadata():\n plugin_name = meta.get('plugin_name')\n if plugin_name == 'builtins':\n continue\n version = meta.get('version')\n version_string = f\": {version}\" if version else \"\"\n plugin_strings[plugin_name] = f\" - {plugin_name}{version_string}\"\n\n npe2_plugin_manager = Npe2PluginManager.instance()\n for manifest in npe2_plugin_manager.iter_manifests():\n plugin_name = manifest.name\n if plugin_name in (\"napari\", \"builtins\"):\n continue\n version = manifest.package_version\n version_string = f\": {version}\" if version else \"\"\n plugin_strings[plugin_name] = f\" - {plugin_name}{version_string}\"\n\n text += '<br><b>Plugins</b>:'\n text += (\n (\"<br>\" + \"<br>\".join(sorted(plugin_strings.values())))\n if plugin_strings\n else ' None'\n )\n\n if not as_html:\n text = (\n text.replace(\"<br>\", \"\\n\").replace(\"<b>\", \"\").replace(\"</b>\", \"\")\n )\n return text\n\n\ncitation_text = (\n 'napari contributors (2019). napari: a '\n 'multi-dimensional image viewer for python. '\n 'doi:10.5281/zenodo.3555620'\n)\n", "path": "napari/utils/info.py"}]} | 2,044 | 409 |
gh_patches_debug_14393 | rasdani/github-patches | git_diff | falconry__falcon-993 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Default OPTIONS responder does not set Content-Length to "0"
Per RFC 7231:
> A server MUST generate a Content-Length field with a value of "0" if no payload body is to be sent in the response.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/responders.py`
Content:
```
1 # Copyright 2013 by Rackspace Hosting, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Default responder implementations."""
16
17 from falcon.errors import HTTPBadRequest
18 from falcon.errors import HTTPMethodNotAllowed
19 from falcon.errors import HTTPNotFound
20 from falcon.status_codes import HTTP_204
21
22
23 def path_not_found(req, resp, **kwargs):
24 """Raise 404 HTTPNotFound error"""
25 raise HTTPNotFound()
26
27
28 def bad_request(req, resp, **kwargs):
29 """Raise 400 HTTPBadRequest error"""
30 raise HTTPBadRequest('Bad request', 'Invalid HTTP method')
31
32
33 def create_method_not_allowed(allowed_methods):
34 """Creates a responder for "405 Method Not Allowed"
35
36 Args:
37 allowed_methods: A list of HTTP methods (uppercase) that should be
38 returned in the Allow header.
39
40 """
41 def method_not_allowed(req, resp, **kwargs):
42 """Raise 405 HTTPMethodNotAllowed error"""
43 raise HTTPMethodNotAllowed(allowed_methods)
44
45 return method_not_allowed
46
47
48 def create_default_options(allowed_methods):
49 """Creates a default responder for the OPTIONS method
50
51 Args:
52 allowed_methods: A list of HTTP methods (uppercase) that should be
53 returned in the Allow header.
54
55 """
56 allowed = ', '.join(allowed_methods)
57
58 def on_options(req, resp, **kwargs):
59 resp.status = HTTP_204
60 resp.set_header('Allow', allowed)
61 resp.set_header('Content-Length', '0')
62
63 return on_options
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/falcon/responders.py b/falcon/responders.py
--- a/falcon/responders.py
+++ b/falcon/responders.py
@@ -17,7 +17,7 @@
from falcon.errors import HTTPBadRequest
from falcon.errors import HTTPMethodNotAllowed
from falcon.errors import HTTPNotFound
-from falcon.status_codes import HTTP_204
+from falcon.status_codes import HTTP_200
def path_not_found(req, resp, **kwargs):
@@ -56,7 +56,7 @@
allowed = ', '.join(allowed_methods)
def on_options(req, resp, **kwargs):
- resp.status = HTTP_204
+ resp.status = HTTP_200
resp.set_header('Allow', allowed)
resp.set_header('Content-Length', '0')
| {"golden_diff": "diff --git a/falcon/responders.py b/falcon/responders.py\n--- a/falcon/responders.py\n+++ b/falcon/responders.py\n@@ -17,7 +17,7 @@\n from falcon.errors import HTTPBadRequest\n from falcon.errors import HTTPMethodNotAllowed\n from falcon.errors import HTTPNotFound\n-from falcon.status_codes import HTTP_204\n+from falcon.status_codes import HTTP_200\n \n \n def path_not_found(req, resp, **kwargs):\n@@ -56,7 +56,7 @@\n allowed = ', '.join(allowed_methods)\n \n def on_options(req, resp, **kwargs):\n- resp.status = HTTP_204\n+ resp.status = HTTP_200\n resp.set_header('Allow', allowed)\n resp.set_header('Content-Length', '0')\n", "issue": "Default OPTIONS responder does not set Content-Length to \"0\"\nPer RFC 7231:\n\n> A server MUST generate a Content-Length field with a value of \"0\" if no payload body is to be sent in the response.\n\n", "before_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Default responder implementations.\"\"\"\n\nfrom falcon.errors import HTTPBadRequest\nfrom falcon.errors import HTTPMethodNotAllowed\nfrom falcon.errors import HTTPNotFound\nfrom falcon.status_codes import HTTP_204\n\n\ndef path_not_found(req, resp, **kwargs):\n \"\"\"Raise 404 HTTPNotFound error\"\"\"\n raise HTTPNotFound()\n\n\ndef bad_request(req, resp, **kwargs):\n \"\"\"Raise 400 HTTPBadRequest error\"\"\"\n raise HTTPBadRequest('Bad request', 'Invalid HTTP method')\n\n\ndef create_method_not_allowed(allowed_methods):\n \"\"\"Creates a responder for \"405 Method Not Allowed\"\n\n Args:\n allowed_methods: A list of HTTP methods (uppercase) that should be\n returned in the Allow header.\n\n \"\"\"\n def method_not_allowed(req, resp, **kwargs):\n \"\"\"Raise 405 HTTPMethodNotAllowed error\"\"\"\n raise HTTPMethodNotAllowed(allowed_methods)\n\n return method_not_allowed\n\n\ndef create_default_options(allowed_methods):\n \"\"\"Creates a default responder for the OPTIONS method\n\n Args:\n allowed_methods: A list of HTTP methods (uppercase) that should be\n returned in the Allow header.\n\n \"\"\"\n allowed = ', '.join(allowed_methods)\n\n def on_options(req, resp, **kwargs):\n resp.status = HTTP_204\n resp.set_header('Allow', allowed)\n resp.set_header('Content-Length', '0')\n\n return on_options\n", "path": "falcon/responders.py"}], "after_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Default responder implementations.\"\"\"\n\nfrom falcon.errors import HTTPBadRequest\nfrom falcon.errors import HTTPMethodNotAllowed\nfrom falcon.errors import HTTPNotFound\nfrom falcon.status_codes import HTTP_200\n\n\ndef path_not_found(req, resp, **kwargs):\n \"\"\"Raise 404 HTTPNotFound error\"\"\"\n raise HTTPNotFound()\n\n\ndef bad_request(req, resp, **kwargs):\n \"\"\"Raise 400 HTTPBadRequest error\"\"\"\n raise HTTPBadRequest('Bad request', 'Invalid HTTP method')\n\n\ndef create_method_not_allowed(allowed_methods):\n \"\"\"Creates a responder for \"405 Method Not Allowed\"\n\n Args:\n allowed_methods: A list of HTTP methods (uppercase) that should be\n returned in the Allow header.\n\n \"\"\"\n def method_not_allowed(req, resp, **kwargs):\n \"\"\"Raise 405 HTTPMethodNotAllowed error\"\"\"\n raise HTTPMethodNotAllowed(allowed_methods)\n\n return method_not_allowed\n\n\ndef create_default_options(allowed_methods):\n \"\"\"Creates a default responder for the OPTIONS method\n\n Args:\n allowed_methods: A list of HTTP methods (uppercase) that should be\n returned in the Allow header.\n\n \"\"\"\n allowed = ', '.join(allowed_methods)\n\n def on_options(req, resp, **kwargs):\n resp.status = HTTP_200\n resp.set_header('Allow', allowed)\n resp.set_header('Content-Length', '0')\n\n return on_options\n", "path": "falcon/responders.py"}]} | 874 | 182 |
gh_patches_debug_37383 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-3568 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python3.4 PyQt5 QML application requires environment variables
With the PyQt5 fixes merged https://github.com/pyinstaller/pyinstaller/pull/3439 I decided to try creating an executable that I have been having trouble with. https://github.com/pyinstaller/pyinstaller/pull/3439#issuecomment-379064155
This is the source code https://github.com/Siecje/qml-testing/tree/PyInstaller
I'm using
- Windows 7 32-bit
- Qt 5.10.1
- PyQt5 compiled from source
- Python 3.4.4
- pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip
When I run the .exe I get an error
```
QWindowsEGLStaticContext::create: Failed to load and resolve libEGL function
Failed to load opengl32sw.dll (The specified module could not be found.)
Failed to load and resolve WGL/OpenGL functions
Failed to create OpenGL context for format QsurfaceFormat(version 2.0, options QFlags<QSurfaceFormat::FormatOption>(), depthBufferSize 24, ...<snip>
This is most likely caused by not having the necessary graphics drivers installed.
Install a driver providing OpenGL 2.0 or higher, or, if this is not possible, make sure the ANGLE Open GL ES 2.0 emulation libraries (libEGL.dll, libLESv2.dll and d3dcompiler_*.dll) are available in the application executabl's directory or in a location listed in PATH.
```
To run the application I can copy these four .dlls into the `dist\main\` directory.
- libEGL.dll
- libGLESv2.dll
- d3dcompiler_47.dll
- opengl32sw.dll
When I run it I get Command Prompt window with this output.
```
QWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001
QWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available.
```
Instead of copying those .dll files I can add the Qt bin directory to my PATH.
```
set PATH=%PATH%;C:\Qt\5.10.1\msvc2015\bin
call main.exe
QWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001
QWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available.
```
When I copy the `dist\main\` to another computer (Windows 10).
I have to set two environment variables before the application will work.
```
set QT_QPA_PLATFORM_PLUGIN_PATH=%exeDir%\PyQt5\Qt\plugins\platforms
set QML2_IMPORT_PATH=%exeDir%\PyQt5\Qt\qml
```
There are no error messages on the Windows 10 computer with these two environment variables set.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-PyQt5.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2018, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9 import os
10
11 from PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files
12
13 hiddenimports = ['sip']
14
15 # Collect the ``qt.conf`` file.
16 datas = [x for x in
17 collect_system_data_files(pyqt5_library_info.location['PrefixPath'],
18 'PyQt5')
19 if os.path.basename(x[0]) == 'qt.conf']
20
21 # Include ICU files, if they exist. See the "Deployment approach" section in
22 # ``PyInstaller/utils/hooks/qt.py``.
23 [(os.path.join(pyqt5_library_info.location['BinariesPath'], dll),
24 os.path.join('PyQt5', 'Qt', 'bin', dll))
25 for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')]
26
27 # TODO: Include software rendering for OpenGL. See the "Deployment approach". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented.
28 ##binaries = []
29 ##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'):
30 ## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll)
31 ## # Only add files if they exist.
32 ## if glob(dll_path):
33 ## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))]
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/hooks/hook-PyQt5.py b/PyInstaller/hooks/hook-PyQt5.py
--- a/PyInstaller/hooks/hook-PyQt5.py
+++ b/PyInstaller/hooks/hook-PyQt5.py
@@ -6,6 +6,7 @@
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
+import glob
import os
from PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files
@@ -18,16 +19,42 @@
'PyQt5')
if os.path.basename(x[0]) == 'qt.conf']
-# Include ICU files, if they exist. See the "Deployment approach" section in
-# ``PyInstaller/utils/hooks/qt.py``.
-[(os.path.join(pyqt5_library_info.location['BinariesPath'], dll),
- os.path.join('PyQt5', 'Qt', 'bin', dll))
- for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')]
-
-# TODO: Include software rendering for OpenGL. See the "Deployment approach". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented.
-##binaries = []
-##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'):
-## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll)
-## # Only add files if they exist.
-## if glob(dll_path):
-## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))]
+
+def find_all_or_none(globs_to_include, num_files):
+ """
+ globs_to_include is a list of file name globs
+ If the number of found files does not match num_files
+ then no files will be included.
+ """
+ # TODO: This function is required because CI is failing to include libEGL
+ # The error in AppVeyor is:
+ # [2312] LOADER: Running pyi_lib_PyQt5-uic.py
+ # Failed to load libEGL (Access is denied.)
+ # More info: https://github.com/pyinstaller/pyinstaller/pull/3568
+ # Since the PyQt5 wheels do not include d3dcompiler_4?.dll, libEGL.dll and
+ # libGLESv2.dll will not be included for PyQt5 builds during CI.
+ to_include = []
+ for dll in globs_to_include:
+ dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'],
+ dll)
+ dll_file_paths = glob.glob(dll_path)
+ for dll_file_path in dll_file_paths:
+ file_name = os.path.basename(dll_file_path)
+ dst_dll_path = os.path.join('PyQt5', 'Qt', 'bin', file_name)
+ to_include.append((dll_file_path, dst_dll_path))
+ if len(to_include) == num_files:
+ return to_include
+ return []
+
+
+binaries = []
+angle_files = ['libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll']
+binaries += find_all_or_none(angle_files, 3)
+
+opengl_software_renderer = ['opengl32sw.dll']
+binaries += find_all_or_none(opengl_software_renderer, 1)
+
+# Include ICU files, if they exist.
+# See the "Deployment approach" section in ``PyInstaller/utils/hooks/qt.py``.
+icu_files = ['icudt??.dll', 'icuin??.dll', 'icuuc??.dll']
+binaries += find_all_or_none(icu_files, 3)
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-PyQt5.py b/PyInstaller/hooks/hook-PyQt5.py\n--- a/PyInstaller/hooks/hook-PyQt5.py\n+++ b/PyInstaller/hooks/hook-PyQt5.py\n@@ -6,6 +6,7 @@\n #\n # The full license is in the file COPYING.txt, distributed with this software.\n #-----------------------------------------------------------------------------\n+import glob\n import os\n \n from PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files\n@@ -18,16 +19,42 @@\n 'PyQt5')\n if os.path.basename(x[0]) == 'qt.conf']\n \n-# Include ICU files, if they exist. See the \"Deployment approach\" section in\n-# ``PyInstaller/utils/hooks/qt.py``.\n-[(os.path.join(pyqt5_library_info.location['BinariesPath'], dll),\n- os.path.join('PyQt5', 'Qt', 'bin', dll))\n- for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')]\n-\n-# TODO: Include software rendering for OpenGL. See the \"Deployment approach\". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented.\n-##binaries = []\n-##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'):\n-## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll)\n-## # Only add files if they exist.\n-## if glob(dll_path):\n-## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))]\n+\n+def find_all_or_none(globs_to_include, num_files):\n+ \"\"\"\n+ globs_to_include is a list of file name globs\n+ If the number of found files does not match num_files\n+ then no files will be included.\n+ \"\"\"\n+ # TODO: This function is required because CI is failing to include libEGL\n+ # The error in AppVeyor is:\n+ # [2312] LOADER: Running pyi_lib_PyQt5-uic.py\n+ # Failed to load libEGL (Access is denied.)\n+ # More info: https://github.com/pyinstaller/pyinstaller/pull/3568\n+ # Since the PyQt5 wheels do not include d3dcompiler_4?.dll, libEGL.dll and\n+ # libGLESv2.dll will not be included for PyQt5 builds during CI.\n+ to_include = []\n+ for dll in globs_to_include:\n+ dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'],\n+ dll)\n+ dll_file_paths = glob.glob(dll_path)\n+ for dll_file_path in dll_file_paths:\n+ file_name = os.path.basename(dll_file_path)\n+ dst_dll_path = os.path.join('PyQt5', 'Qt', 'bin', file_name)\n+ to_include.append((dll_file_path, dst_dll_path))\n+ if len(to_include) == num_files:\n+ return to_include\n+ return []\n+\n+\n+binaries = []\n+angle_files = ['libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll']\n+binaries += find_all_or_none(angle_files, 3)\n+\n+opengl_software_renderer = ['opengl32sw.dll']\n+binaries += find_all_or_none(opengl_software_renderer, 1)\n+\n+# Include ICU files, if they exist.\n+# See the \"Deployment approach\" section in ``PyInstaller/utils/hooks/qt.py``.\n+icu_files = ['icudt??.dll', 'icuin??.dll', 'icuuc??.dll']\n+binaries += find_all_or_none(icu_files, 3)\n", "issue": "Python3.4 PyQt5 QML application requires environment variables\nWith the PyQt5 fixes merged https://github.com/pyinstaller/pyinstaller/pull/3439 I decided to try creating an executable that I have been having trouble with. https://github.com/pyinstaller/pyinstaller/pull/3439#issuecomment-379064155\r\n\r\nThis is the source code https://github.com/Siecje/qml-testing/tree/PyInstaller\r\n\r\nI'm using\r\n- Windows 7 32-bit\r\n- Qt 5.10.1\r\n- PyQt5 compiled from source\r\n- Python 3.4.4\r\n- pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip\r\nWhen I run the .exe I get an error\r\n\r\n```\r\nQWindowsEGLStaticContext::create: Failed to load and resolve libEGL function\r\nFailed to load opengl32sw.dll (The specified module could not be found.)\r\nFailed to load and resolve WGL/OpenGL functions\r\nFailed to create OpenGL context for format QsurfaceFormat(version 2.0, options QFlags<QSurfaceFormat::FormatOption>(), depthBufferSize 24, ...<snip>\r\nThis is most likely caused by not having the necessary graphics drivers installed.\r\n\r\nInstall a driver providing OpenGL 2.0 or higher, or, if this is not possible, make sure the ANGLE Open GL ES 2.0 emulation libraries (libEGL.dll, libLESv2.dll and d3dcompiler_*.dll) are available in the application executabl's directory or in a location listed in PATH.\r\n```\r\n\r\nTo run the application I can copy these four .dlls into the `dist\\main\\` directory.\r\n\r\n- libEGL.dll\r\n- libGLESv2.dll\r\n- d3dcompiler_47.dll\r\n- opengl32sw.dll\r\n\r\nWhen I run it I get Command Prompt window with this output.\r\n```\r\nQWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001\r\nQWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available.\r\n```\r\nInstead of copying those .dll files I can add the Qt bin directory to my PATH.\r\n\r\n```\r\nset PATH=%PATH%;C:\\Qt\\5.10.1\\msvc2015\\bin\r\ncall main.exe\r\nQWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001\r\nQWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available.\r\n```\r\n\r\nWhen I copy the `dist\\main\\` to another computer (Windows 10).\r\n\r\nI have to set two environment variables before the application will work.\r\n\r\n```\r\nset QT_QPA_PLATFORM_PLUGIN_PATH=%exeDir%\\PyQt5\\Qt\\plugins\\platforms\r\nset QML2_IMPORT_PATH=%exeDir%\\PyQt5\\Qt\\qml\r\n```\r\n\r\nThere are no error messages on the Windows 10 computer with these two environment variables set.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nimport os\n\nfrom PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files\n\nhiddenimports = ['sip']\n\n# Collect the ``qt.conf`` file.\ndatas = [x for x in\n collect_system_data_files(pyqt5_library_info.location['PrefixPath'],\n 'PyQt5')\n if os.path.basename(x[0]) == 'qt.conf']\n\n# Include ICU files, if they exist. See the \"Deployment approach\" section in\n# ``PyInstaller/utils/hooks/qt.py``.\n[(os.path.join(pyqt5_library_info.location['BinariesPath'], dll),\n os.path.join('PyQt5', 'Qt', 'bin', dll))\n for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')]\n\n# TODO: Include software rendering for OpenGL. See the \"Deployment approach\". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented.\n##binaries = []\n##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'):\n## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll)\n## # Only add files if they exist.\n## if glob(dll_path):\n## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))]\n", "path": "PyInstaller/hooks/hook-PyQt5.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nimport glob\nimport os\n\nfrom PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files\n\nhiddenimports = ['sip']\n\n# Collect the ``qt.conf`` file.\ndatas = [x for x in\n collect_system_data_files(pyqt5_library_info.location['PrefixPath'],\n 'PyQt5')\n if os.path.basename(x[0]) == 'qt.conf']\n\n\ndef find_all_or_none(globs_to_include, num_files):\n \"\"\"\n globs_to_include is a list of file name globs\n If the number of found files does not match num_files\n then no files will be included.\n \"\"\"\n # TODO: This function is required because CI is failing to include libEGL\n # The error in AppVeyor is:\n # [2312] LOADER: Running pyi_lib_PyQt5-uic.py\n # Failed to load libEGL (Access is denied.)\n # More info: https://github.com/pyinstaller/pyinstaller/pull/3568\n # Since the PyQt5 wheels do not include d3dcompiler_4?.dll, libEGL.dll and\n # libGLESv2.dll will not be included for PyQt5 builds during CI.\n to_include = []\n for dll in globs_to_include:\n dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'],\n dll)\n dll_file_paths = glob.glob(dll_path)\n for dll_file_path in dll_file_paths:\n file_name = os.path.basename(dll_file_path)\n dst_dll_path = os.path.join('PyQt5', 'Qt', 'bin', file_name)\n to_include.append((dll_file_path, dst_dll_path))\n if len(to_include) == num_files:\n return to_include\n return []\n\n\nbinaries = []\nangle_files = ['libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll']\nbinaries += find_all_or_none(angle_files, 3)\n\nopengl_software_renderer = ['opengl32sw.dll']\nbinaries += find_all_or_none(opengl_software_renderer, 1)\n\n# Include ICU files, if they exist.\n# See the \"Deployment approach\" section in ``PyInstaller/utils/hooks/qt.py``.\nicu_files = ['icudt??.dll', 'icuin??.dll', 'icuuc??.dll']\nbinaries += find_all_or_none(icu_files, 3)\n", "path": "PyInstaller/hooks/hook-PyQt5.py"}]} | 1,408 | 918 |
gh_patches_debug_24813 | rasdani/github-patches | git_diff | scrapy__scrapy-2577 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BrowserLikeContextFactory not available in some conditions
While tracing the error that lead to #2555 I tried the workaround [mentioned in the documentation](https://doc.scrapy.org/en/latest/topics/settings.html#downloader-clientcontextfactory) without success.
This code lives [incontextfactory.py](https://github.com/scrapy/scrapy/blob/c3411373e8a8ee2786588bdad7be469c69a25e2a/scrapy/core/downloader/contextfactory.py#L63) but was not reachable as the import was failing on my system due to #2555.
This file is a large `try/except` block with many potential points of failure and it's likely to trip other users in the future.
That said, could this be refactored to provide a fallback for `BrowserLikeContextFactory` or otherwise reduce the scope of the `try/except` to avoid breaking the API?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/core/downloader/contextfactory.py`
Content:
```
1 from OpenSSL import SSL
2 from twisted.internet.ssl import ClientContextFactory
3
4 try:
5
6 from zope.interface.declarations import implementer
7
8 # the following should be available from Twisted 14.0.0
9 from twisted.internet.ssl import (optionsForClientTLS,
10 CertificateOptions,
11 platformTrust)
12
13 from twisted.web.client import BrowserLikePolicyForHTTPS
14 from twisted.web.iweb import IPolicyForHTTPS
15
16 from scrapy.core.downloader.tls import ScrapyClientTLSOptions, DEFAULT_CIPHERS
17
18
19 @implementer(IPolicyForHTTPS)
20 class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):
21 """
22 Non-peer-certificate verifying HTTPS context factory
23
24 Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)
25 which allows TLS protocol negotiation
26
27 'A TLS/SSL connection established with [this method] may
28 understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.'
29 """
30
31 def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs):
32 super(ScrapyClientContextFactory, self).__init__(*args, **kwargs)
33 self._ssl_method = method
34
35 def getCertificateOptions(self):
36 # setting verify=True will require you to provide CAs
37 # to verify against; in other words: it's not that simple
38
39 # backward-compatible SSL/TLS method:
40 #
41 # * this will respect `method` attribute in often recommended
42 # `ScrapyClientContextFactory` subclass
43 # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)
44 #
45 # * getattr() for `_ssl_method` attribute for context factories
46 # not calling super(..., self).__init__
47 return CertificateOptions(verify=False,
48 method=getattr(self, 'method',
49 getattr(self, '_ssl_method', None)),
50 fixBrokenPeers=True,
51 acceptableCiphers=DEFAULT_CIPHERS)
52
53 # kept for old-style HTTP/1.0 downloader context twisted calls,
54 # e.g. connectSSL()
55 def getContext(self, hostname=None, port=None):
56 return self.getCertificateOptions().getContext()
57
58 def creatorForNetloc(self, hostname, port):
59 return ScrapyClientTLSOptions(hostname.decode("ascii"), self.getContext())
60
61
62 @implementer(IPolicyForHTTPS)
63 class BrowserLikeContextFactory(ScrapyClientContextFactory):
64 """
65 Twisted-recommended context factory for web clients.
66
67 Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html:
68 "The default is to use a BrowserLikePolicyForHTTPS,
69 so unless you have special requirements you can leave this as-is."
70
71 creatorForNetloc() is the same as BrowserLikePolicyForHTTPS
72 except this context factory allows setting the TLS/SSL method to use.
73
74 Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)
75 which allows TLS protocol negotiation.
76 """
77 def creatorForNetloc(self, hostname, port):
78
79 # trustRoot set to platformTrust() will use the platform's root CAs.
80 #
81 # This means that a website like https://www.cacert.org will be rejected
82 # by default, since CAcert.org CA certificate is seldom shipped.
83 return optionsForClientTLS(hostname.decode("ascii"),
84 trustRoot=platformTrust(),
85 extraCertificateOptions={
86 'method': self._ssl_method,
87 })
88
89 except ImportError:
90
91 class ScrapyClientContextFactory(ClientContextFactory):
92 "A SSL context factory which is more permissive against SSL bugs."
93 # see https://github.com/scrapy/scrapy/issues/82
94 # and https://github.com/scrapy/scrapy/issues/26
95 # and https://github.com/scrapy/scrapy/issues/981
96
97 def __init__(self, method=SSL.SSLv23_METHOD):
98 self.method = method
99
100 def getContext(self, hostname=None, port=None):
101 ctx = ClientContextFactory.getContext(self)
102 # Enable all workarounds to SSL bugs as documented by
103 # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
104 ctx.set_options(SSL.OP_ALL)
105 return ctx
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/core/downloader/contextfactory.py b/scrapy/core/downloader/contextfactory.py
--- a/scrapy/core/downloader/contextfactory.py
+++ b/scrapy/core/downloader/contextfactory.py
@@ -1,15 +1,15 @@
from OpenSSL import SSL
from twisted.internet.ssl import ClientContextFactory
-try:
+from scrapy import twisted_version
+
+if twisted_version >= (14, 0, 0):
from zope.interface.declarations import implementer
- # the following should be available from Twisted 14.0.0
from twisted.internet.ssl import (optionsForClientTLS,
CertificateOptions,
platformTrust)
-
from twisted.web.client import BrowserLikePolicyForHTTPS
from twisted.web.iweb import IPolicyForHTTPS
@@ -86,7 +86,7 @@
'method': self._ssl_method,
})
-except ImportError:
+else:
class ScrapyClientContextFactory(ClientContextFactory):
"A SSL context factory which is more permissive against SSL bugs."
| {"golden_diff": "diff --git a/scrapy/core/downloader/contextfactory.py b/scrapy/core/downloader/contextfactory.py\n--- a/scrapy/core/downloader/contextfactory.py\n+++ b/scrapy/core/downloader/contextfactory.py\n@@ -1,15 +1,15 @@\n from OpenSSL import SSL\n from twisted.internet.ssl import ClientContextFactory\n \n-try:\n+from scrapy import twisted_version\n+\n+if twisted_version >= (14, 0, 0):\n \n from zope.interface.declarations import implementer\n \n- # the following should be available from Twisted 14.0.0\n from twisted.internet.ssl import (optionsForClientTLS,\n CertificateOptions,\n platformTrust)\n-\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n \n@@ -86,7 +86,7 @@\n 'method': self._ssl_method,\n })\n \n-except ImportError:\n+else:\n \n class ScrapyClientContextFactory(ClientContextFactory):\n \"A SSL context factory which is more permissive against SSL bugs.\"\n", "issue": "BrowserLikeContextFactory not available in some conditions\nWhile tracing the error that lead to #2555 I tried the workaround [mentioned in the documentation](https://doc.scrapy.org/en/latest/topics/settings.html#downloader-clientcontextfactory) without success.\r\n\r\nThis code lives [incontextfactory.py](https://github.com/scrapy/scrapy/blob/c3411373e8a8ee2786588bdad7be469c69a25e2a/scrapy/core/downloader/contextfactory.py#L63) but was not reachable as the import was failing on my system due to #2555.\r\n\r\nThis file is a large `try/except` block with many potential points of failure and it's likely to trip other users in the future.\r\nThat said, could this be refactored to provide a fallback for `BrowserLikeContextFactory` or otherwise reduce the scope of the `try/except` to avoid breaking the API?\n", "before_files": [{"content": "from OpenSSL import SSL\nfrom twisted.internet.ssl import ClientContextFactory\n\ntry:\n\n from zope.interface.declarations import implementer\n\n # the following should be available from Twisted 14.0.0\n from twisted.internet.ssl import (optionsForClientTLS,\n CertificateOptions,\n platformTrust)\n\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n\n from scrapy.core.downloader.tls import ScrapyClientTLSOptions, DEFAULT_CIPHERS\n\n\n @implementer(IPolicyForHTTPS)\n class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):\n \"\"\"\n Non-peer-certificate verifying HTTPS context factory\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation\n\n 'A TLS/SSL connection established with [this method] may\n understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.'\n \"\"\"\n\n def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs):\n super(ScrapyClientContextFactory, self).__init__(*args, **kwargs)\n self._ssl_method = method\n\n def getCertificateOptions(self):\n # setting verify=True will require you to provide CAs\n # to verify against; in other words: it's not that simple\n\n # backward-compatible SSL/TLS method:\n #\n # * this will respect `method` attribute in often recommended\n # `ScrapyClientContextFactory` subclass\n # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)\n #\n # * getattr() for `_ssl_method` attribute for context factories\n # not calling super(..., self).__init__\n return CertificateOptions(verify=False,\n method=getattr(self, 'method',\n getattr(self, '_ssl_method', None)),\n fixBrokenPeers=True,\n acceptableCiphers=DEFAULT_CIPHERS)\n\n # kept for old-style HTTP/1.0 downloader context twisted calls,\n # e.g. connectSSL()\n def getContext(self, hostname=None, port=None):\n return self.getCertificateOptions().getContext()\n\n def creatorForNetloc(self, hostname, port):\n return ScrapyClientTLSOptions(hostname.decode(\"ascii\"), self.getContext())\n\n\n @implementer(IPolicyForHTTPS)\n class BrowserLikeContextFactory(ScrapyClientContextFactory):\n \"\"\"\n Twisted-recommended context factory for web clients.\n\n Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html:\n \"The default is to use a BrowserLikePolicyForHTTPS,\n so unless you have special requirements you can leave this as-is.\"\n\n creatorForNetloc() is the same as BrowserLikePolicyForHTTPS\n except this context factory allows setting the TLS/SSL method to use.\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation.\n \"\"\"\n def creatorForNetloc(self, hostname, port):\n\n # trustRoot set to platformTrust() will use the platform's root CAs.\n #\n # This means that a website like https://www.cacert.org will be rejected\n # by default, since CAcert.org CA certificate is seldom shipped.\n return optionsForClientTLS(hostname.decode(\"ascii\"),\n trustRoot=platformTrust(),\n extraCertificateOptions={\n 'method': self._ssl_method,\n })\n\nexcept ImportError:\n\n class ScrapyClientContextFactory(ClientContextFactory):\n \"A SSL context factory which is more permissive against SSL bugs.\"\n # see https://github.com/scrapy/scrapy/issues/82\n # and https://github.com/scrapy/scrapy/issues/26\n # and https://github.com/scrapy/scrapy/issues/981\n\n def __init__(self, method=SSL.SSLv23_METHOD):\n self.method = method\n\n def getContext(self, hostname=None, port=None):\n ctx = ClientContextFactory.getContext(self)\n # Enable all workarounds to SSL bugs as documented by\n # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html\n ctx.set_options(SSL.OP_ALL)\n return ctx\n", "path": "scrapy/core/downloader/contextfactory.py"}], "after_files": [{"content": "from OpenSSL import SSL\nfrom twisted.internet.ssl import ClientContextFactory\n\nfrom scrapy import twisted_version\n\nif twisted_version >= (14, 0, 0):\n\n from zope.interface.declarations import implementer\n\n from twisted.internet.ssl import (optionsForClientTLS,\n CertificateOptions,\n platformTrust)\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n\n from scrapy.core.downloader.tls import ScrapyClientTLSOptions, DEFAULT_CIPHERS\n\n\n @implementer(IPolicyForHTTPS)\n class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):\n \"\"\"\n Non-peer-certificate verifying HTTPS context factory\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation\n\n 'A TLS/SSL connection established with [this method] may\n understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.'\n \"\"\"\n\n def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs):\n super(ScrapyClientContextFactory, self).__init__(*args, **kwargs)\n self._ssl_method = method\n\n def getCertificateOptions(self):\n # setting verify=True will require you to provide CAs\n # to verify against; in other words: it's not that simple\n\n # backward-compatible SSL/TLS method:\n #\n # * this will respect `method` attribute in often recommended\n # `ScrapyClientContextFactory` subclass\n # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)\n #\n # * getattr() for `_ssl_method` attribute for context factories\n # not calling super(..., self).__init__\n return CertificateOptions(verify=False,\n method=getattr(self, 'method',\n getattr(self, '_ssl_method', None)),\n fixBrokenPeers=True,\n acceptableCiphers=DEFAULT_CIPHERS)\n\n # kept for old-style HTTP/1.0 downloader context twisted calls,\n # e.g. connectSSL()\n def getContext(self, hostname=None, port=None):\n return self.getCertificateOptions().getContext()\n\n def creatorForNetloc(self, hostname, port):\n return ScrapyClientTLSOptions(hostname.decode(\"ascii\"), self.getContext())\n\n\n @implementer(IPolicyForHTTPS)\n class BrowserLikeContextFactory(ScrapyClientContextFactory):\n \"\"\"\n Twisted-recommended context factory for web clients.\n\n Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html:\n \"The default is to use a BrowserLikePolicyForHTTPS,\n so unless you have special requirements you can leave this as-is.\"\n\n creatorForNetloc() is the same as BrowserLikePolicyForHTTPS\n except this context factory allows setting the TLS/SSL method to use.\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation.\n \"\"\"\n def creatorForNetloc(self, hostname, port):\n\n # trustRoot set to platformTrust() will use the platform's root CAs.\n #\n # This means that a website like https://www.cacert.org will be rejected\n # by default, since CAcert.org CA certificate is seldom shipped.\n return optionsForClientTLS(hostname.decode(\"ascii\"),\n trustRoot=platformTrust(),\n extraCertificateOptions={\n 'method': self._ssl_method,\n })\n\nelse:\n\n class ScrapyClientContextFactory(ClientContextFactory):\n \"A SSL context factory which is more permissive against SSL bugs.\"\n # see https://github.com/scrapy/scrapy/issues/82\n # and https://github.com/scrapy/scrapy/issues/26\n # and https://github.com/scrapy/scrapy/issues/981\n\n def __init__(self, method=SSL.SSLv23_METHOD):\n self.method = method\n\n def getContext(self, hostname=None, port=None):\n ctx = ClientContextFactory.getContext(self)\n # Enable all workarounds to SSL bugs as documented by\n # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html\n ctx.set_options(SSL.OP_ALL)\n return ctx\n", "path": "scrapy/core/downloader/contextfactory.py"}]} | 1,612 | 230 |
gh_patches_debug_63531 | rasdani/github-patches | git_diff | MongoEngine__mongoengine-2224 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New release
Hi,
When is coming new release, because I can't update to mongodb 4.2 because of this: https://github.com/MongoEngine/mongoengine/pull/2160/commits/47f8a126ca167cb8fe020e3cc5604b155dfcdebc.
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mongoengine/__init__.py`
Content:
```
1 # Import submodules so that we can expose their __all__
2 from mongoengine import connection
3 from mongoengine import document
4 from mongoengine import errors
5 from mongoengine import fields
6 from mongoengine import queryset
7 from mongoengine import signals
8
9 # Import everything from each submodule so that it can be accessed via
10 # mongoengine, e.g. instead of `from mongoengine.connection import connect`,
11 # users can simply use `from mongoengine import connect`, or even
12 # `from mongoengine import *` and then `connect('testdb')`.
13 from mongoengine.connection import *
14 from mongoengine.document import *
15 from mongoengine.errors import *
16 from mongoengine.fields import *
17 from mongoengine.queryset import *
18 from mongoengine.signals import *
19
20
21 __all__ = (
22 list(document.__all__)
23 + list(fields.__all__)
24 + list(connection.__all__)
25 + list(queryset.__all__)
26 + list(signals.__all__)
27 + list(errors.__all__)
28 )
29
30
31 VERSION = (0, 18, 2)
32
33
34 def get_version():
35 """Return the VERSION as a string.
36
37 For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.
38 """
39 return ".".join(map(str, VERSION))
40
41
42 __version__ = get_version()
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mongoengine/__init__.py b/mongoengine/__init__.py
--- a/mongoengine/__init__.py
+++ b/mongoengine/__init__.py
@@ -28,7 +28,7 @@
)
-VERSION = (0, 18, 2)
+VERSION = (0, 19, 0)
def get_version():
| {"golden_diff": "diff --git a/mongoengine/__init__.py b/mongoengine/__init__.py\n--- a/mongoengine/__init__.py\n+++ b/mongoengine/__init__.py\n@@ -28,7 +28,7 @@\n )\n \n \n-VERSION = (0, 18, 2)\n+VERSION = (0, 19, 0)\n \n \n def get_version():\n", "issue": "New release\nHi,\r\n\r\nWhen is coming new release, because I can't update to mongodb 4.2 because of this: https://github.com/MongoEngine/mongoengine/pull/2160/commits/47f8a126ca167cb8fe020e3cc5604b155dfcdebc.\r\n\r\nThanks\n", "before_files": [{"content": "# Import submodules so that we can expose their __all__\nfrom mongoengine import connection\nfrom mongoengine import document\nfrom mongoengine import errors\nfrom mongoengine import fields\nfrom mongoengine import queryset\nfrom mongoengine import signals\n\n# Import everything from each submodule so that it can be accessed via\n# mongoengine, e.g. instead of `from mongoengine.connection import connect`,\n# users can simply use `from mongoengine import connect`, or even\n# `from mongoengine import *` and then `connect('testdb')`.\nfrom mongoengine.connection import *\nfrom mongoengine.document import *\nfrom mongoengine.errors import *\nfrom mongoengine.fields import *\nfrom mongoengine.queryset import *\nfrom mongoengine.signals import *\n\n\n__all__ = (\n list(document.__all__)\n + list(fields.__all__)\n + list(connection.__all__)\n + list(queryset.__all__)\n + list(signals.__all__)\n + list(errors.__all__)\n)\n\n\nVERSION = (0, 18, 2)\n\n\ndef get_version():\n \"\"\"Return the VERSION as a string.\n\n For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.\n \"\"\"\n return \".\".join(map(str, VERSION))\n\n\n__version__ = get_version()\n", "path": "mongoengine/__init__.py"}], "after_files": [{"content": "# Import submodules so that we can expose their __all__\nfrom mongoengine import connection\nfrom mongoengine import document\nfrom mongoengine import errors\nfrom mongoengine import fields\nfrom mongoengine import queryset\nfrom mongoengine import signals\n\n# Import everything from each submodule so that it can be accessed via\n# mongoengine, e.g. instead of `from mongoengine.connection import connect`,\n# users can simply use `from mongoengine import connect`, or even\n# `from mongoengine import *` and then `connect('testdb')`.\nfrom mongoengine.connection import *\nfrom mongoengine.document import *\nfrom mongoengine.errors import *\nfrom mongoengine.fields import *\nfrom mongoengine.queryset import *\nfrom mongoengine.signals import *\n\n\n__all__ = (\n list(document.__all__)\n + list(fields.__all__)\n + list(connection.__all__)\n + list(queryset.__all__)\n + list(signals.__all__)\n + list(errors.__all__)\n)\n\n\nVERSION = (0, 19, 0)\n\n\ndef get_version():\n \"\"\"Return the VERSION as a string.\n\n For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.\n \"\"\"\n return \".\".join(map(str, VERSION))\n\n\n__version__ = get_version()\n", "path": "mongoengine/__init__.py"}]} | 700 | 85 |
gh_patches_debug_4281 | rasdani/github-patches | git_diff | ocf__ocfweb-808 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
disk quota on Account commands page errors
When trying to check disk quota usage on the commands page (https://www.ocf.berkeley.edu/account/commands/)
this error appears
`quota: Bad output format units for human readable output: vQ`

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ocfweb/account/commands.py`
Content:
```
1 from django import forms
2 from django.forms import widgets
3 from django.http import HttpRequest
4 from django.http import HttpResponse
5 from django.shortcuts import render
6 from paramiko import AuthenticationException
7 from paramiko import SSHClient
8 from paramiko.hostkeys import HostKeyEntry
9
10 from ocfweb.component.forms import Form
11
12
13 def commands(request: HttpRequest) -> HttpResponse:
14 command_to_run = ''
15 output = ''
16 error = ''
17 if request.method == 'POST':
18 form = CommandForm(request.POST)
19 if form.is_valid():
20 username = form.cleaned_data['username']
21 password = form.cleaned_data['password']
22
23 command_to_run = form.cleaned_data['command_to_run']
24
25 ssh = SSHClient()
26
27 host_keys = ssh.get_host_keys()
28 entry = HostKeyEntry.from_line(
29 'ssh.ocf.berkeley.edu ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAqMkHVVoMl8md25iky7e2Xe3ARaC4H1PbIpv5Y+xT4KOT17gGvFSmfjGyW9P8ZTyqxq560iWdyELIn7efaGPbkUo9retcnT6WLmuh9nRIYwb6w7BGEEvlblBmH27Fkgt7JQ6+1sr5teuABfIMg22WTQAeDQe1jg0XsPu36OjbC7HjA3BXsiNBpxKDolYIXWzOD+r9FxZLP0lawh8dl//O5FW4ha1IbHklq2i9Mgl79wAH3jxf66kQJTvLmalKnQ0Dbp2+vYGGhIjVFXlGSzKsHAVhuVD6TBXZbxWOYoXanS7CC43MrEtBYYnc6zMn/k/rH0V+WeRhuzTnr/OZGJbBBw==', # noqa
30 )
31 assert entry is not None # should never be none as we are passing a static string above
32 host_keys.add(
33 'ssh.ocf.berkeley.edu',
34 'ssh-rsa',
35 entry.key,
36 )
37
38 try:
39 ssh.connect(
40 'ssh.ocf.berkeley.edu',
41 username=username,
42 password=password,
43 )
44 except AuthenticationException:
45 error = 'Authentication failed. Did you type the wrong username or password?'
46
47 if not error:
48 _, ssh_stdout, ssh_stderr = ssh.exec_command(command_to_run, get_pty=True)
49 output = ssh_stdout.read().decode()
50 error = ssh_stderr.read().decode()
51 else:
52 form = CommandForm()
53
54 return render(
55 request,
56 'account/commands/index.html', {
57 'title': 'Account commands',
58 'form': form,
59 'command': command_to_run,
60 'output': output,
61 'error': error,
62 },
63 )
64
65
66 class CommandForm(Form):
67 username = forms.CharField(
68 label='OCF username',
69 min_length=3,
70 max_length=16,
71 )
72 password = forms.CharField(
73 widget=forms.PasswordInput,
74 label='Password',
75 min_length=8,
76 max_length=256,
77 )
78
79 COMMAND_CHOICES = (
80 (
81 '/opt/share/utils/bin/paper',
82 'paper quota -- how many pages you have remaining this semester',
83 ),
84 (
85 '/usr/bin/quota -svQ',
86 'disk quota -- how much disk space you have used and how much you ' +
87 'have left',
88 ),
89 (
90 '/opt/share/utils/bin/makehttp',
91 'makehttp -- set up the web space for your OCF account',
92 ),
93 (
94 'echo yes | /opt/share/utils/bin/makemysql',
95 'makemysql -- reset your MySQL database password, or create a new ' +
96 'MySQL database (copy down the password somewhere secure)',
97 ),
98 )
99
100 command_to_run = forms.ChoiceField(
101 choices=COMMAND_CHOICES,
102 label='Command to run',
103 widget=widgets.RadioSelect,
104 )
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ocfweb/account/commands.py b/ocfweb/account/commands.py
--- a/ocfweb/account/commands.py
+++ b/ocfweb/account/commands.py
@@ -82,7 +82,7 @@
'paper quota -- how many pages you have remaining this semester',
),
(
- '/usr/bin/quota -svQ',
+ '/usr/bin/quota -vQs',
'disk quota -- how much disk space you have used and how much you ' +
'have left',
),
| {"golden_diff": "diff --git a/ocfweb/account/commands.py b/ocfweb/account/commands.py\n--- a/ocfweb/account/commands.py\n+++ b/ocfweb/account/commands.py\n@@ -82,7 +82,7 @@\n 'paper quota -- how many pages you have remaining this semester',\n ),\n (\n- '/usr/bin/quota -svQ',\n+ '/usr/bin/quota -vQs',\n 'disk quota -- how much disk space you have used and how much you ' +\n 'have left',\n ),\n", "issue": "disk quota on Account commands page errors\nWhen trying to check disk quota usage on the commands page (https://www.ocf.berkeley.edu/account/commands/)\r\nthis error appears\r\n\r\n`quota: Bad output format units for human readable output: vQ`\r\n\r\n\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.forms import widgets\nfrom django.http import HttpRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom paramiko import AuthenticationException\nfrom paramiko import SSHClient\nfrom paramiko.hostkeys import HostKeyEntry\n\nfrom ocfweb.component.forms import Form\n\n\ndef commands(request: HttpRequest) -> HttpResponse:\n command_to_run = ''\n output = ''\n error = ''\n if request.method == 'POST':\n form = CommandForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n\n command_to_run = form.cleaned_data['command_to_run']\n\n ssh = SSHClient()\n\n host_keys = ssh.get_host_keys()\n entry = HostKeyEntry.from_line(\n 'ssh.ocf.berkeley.edu ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAqMkHVVoMl8md25iky7e2Xe3ARaC4H1PbIpv5Y+xT4KOT17gGvFSmfjGyW9P8ZTyqxq560iWdyELIn7efaGPbkUo9retcnT6WLmuh9nRIYwb6w7BGEEvlblBmH27Fkgt7JQ6+1sr5teuABfIMg22WTQAeDQe1jg0XsPu36OjbC7HjA3BXsiNBpxKDolYIXWzOD+r9FxZLP0lawh8dl//O5FW4ha1IbHklq2i9Mgl79wAH3jxf66kQJTvLmalKnQ0Dbp2+vYGGhIjVFXlGSzKsHAVhuVD6TBXZbxWOYoXanS7CC43MrEtBYYnc6zMn/k/rH0V+WeRhuzTnr/OZGJbBBw==', # noqa\n )\n assert entry is not None # should never be none as we are passing a static string above\n host_keys.add(\n 'ssh.ocf.berkeley.edu',\n 'ssh-rsa',\n entry.key,\n )\n\n try:\n ssh.connect(\n 'ssh.ocf.berkeley.edu',\n username=username,\n password=password,\n )\n except AuthenticationException:\n error = 'Authentication failed. Did you type the wrong username or password?'\n\n if not error:\n _, ssh_stdout, ssh_stderr = ssh.exec_command(command_to_run, get_pty=True)\n output = ssh_stdout.read().decode()\n error = ssh_stderr.read().decode()\n else:\n form = CommandForm()\n\n return render(\n request,\n 'account/commands/index.html', {\n 'title': 'Account commands',\n 'form': form,\n 'command': command_to_run,\n 'output': output,\n 'error': error,\n },\n )\n\n\nclass CommandForm(Form):\n username = forms.CharField(\n label='OCF username',\n min_length=3,\n max_length=16,\n )\n password = forms.CharField(\n widget=forms.PasswordInput,\n label='Password',\n min_length=8,\n max_length=256,\n )\n\n COMMAND_CHOICES = (\n (\n '/opt/share/utils/bin/paper',\n 'paper quota -- how many pages you have remaining this semester',\n ),\n (\n '/usr/bin/quota -svQ',\n 'disk quota -- how much disk space you have used and how much you ' +\n 'have left',\n ),\n (\n '/opt/share/utils/bin/makehttp',\n 'makehttp -- set up the web space for your OCF account',\n ),\n (\n 'echo yes | /opt/share/utils/bin/makemysql',\n 'makemysql -- reset your MySQL database password, or create a new ' +\n 'MySQL database (copy down the password somewhere secure)',\n ),\n )\n\n command_to_run = forms.ChoiceField(\n choices=COMMAND_CHOICES,\n label='Command to run',\n widget=widgets.RadioSelect,\n )\n", "path": "ocfweb/account/commands.py"}], "after_files": [{"content": "from django import forms\nfrom django.forms import widgets\nfrom django.http import HttpRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom paramiko import AuthenticationException\nfrom paramiko import SSHClient\nfrom paramiko.hostkeys import HostKeyEntry\n\nfrom ocfweb.component.forms import Form\n\n\ndef commands(request: HttpRequest) -> HttpResponse:\n command_to_run = ''\n output = ''\n error = ''\n if request.method == 'POST':\n form = CommandForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n\n command_to_run = form.cleaned_data['command_to_run']\n\n ssh = SSHClient()\n\n host_keys = ssh.get_host_keys()\n entry = HostKeyEntry.from_line(\n 'ssh.ocf.berkeley.edu ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAqMkHVVoMl8md25iky7e2Xe3ARaC4H1PbIpv5Y+xT4KOT17gGvFSmfjGyW9P8ZTyqxq560iWdyELIn7efaGPbkUo9retcnT6WLmuh9nRIYwb6w7BGEEvlblBmH27Fkgt7JQ6+1sr5teuABfIMg22WTQAeDQe1jg0XsPu36OjbC7HjA3BXsiNBpxKDolYIXWzOD+r9FxZLP0lawh8dl//O5FW4ha1IbHklq2i9Mgl79wAH3jxf66kQJTvLmalKnQ0Dbp2+vYGGhIjVFXlGSzKsHAVhuVD6TBXZbxWOYoXanS7CC43MrEtBYYnc6zMn/k/rH0V+WeRhuzTnr/OZGJbBBw==', # noqa\n )\n assert entry is not None # should never be none as we are passing a static string above\n host_keys.add(\n 'ssh.ocf.berkeley.edu',\n 'ssh-rsa',\n entry.key,\n )\n\n try:\n ssh.connect(\n 'ssh.ocf.berkeley.edu',\n username=username,\n password=password,\n )\n except AuthenticationException:\n error = 'Authentication failed. Did you type the wrong username or password?'\n\n if not error:\n _, ssh_stdout, ssh_stderr = ssh.exec_command(command_to_run, get_pty=True)\n output = ssh_stdout.read().decode()\n error = ssh_stderr.read().decode()\n else:\n form = CommandForm()\n\n return render(\n request,\n 'account/commands/index.html', {\n 'title': 'Account commands',\n 'form': form,\n 'command': command_to_run,\n 'output': output,\n 'error': error,\n },\n )\n\n\nclass CommandForm(Form):\n username = forms.CharField(\n label='OCF username',\n min_length=3,\n max_length=16,\n )\n password = forms.CharField(\n widget=forms.PasswordInput,\n label='Password',\n min_length=8,\n max_length=256,\n )\n\n COMMAND_CHOICES = (\n (\n '/opt/share/utils/bin/paper',\n 'paper quota -- how many pages you have remaining this semester',\n ),\n (\n '/usr/bin/quota -vQs',\n 'disk quota -- how much disk space you have used and how much you ' +\n 'have left',\n ),\n (\n '/opt/share/utils/bin/makehttp',\n 'makehttp -- set up the web space for your OCF account',\n ),\n (\n 'echo yes | /opt/share/utils/bin/makemysql',\n 'makemysql -- reset your MySQL database password, or create a new ' +\n 'MySQL database (copy down the password somewhere secure)',\n ),\n )\n\n command_to_run = forms.ChoiceField(\n choices=COMMAND_CHOICES,\n label='Command to run',\n widget=widgets.RadioSelect,\n )\n", "path": "ocfweb/account/commands.py"}]} | 1,501 | 122 |
gh_patches_debug_23038 | rasdani/github-patches | git_diff | aws__aws-cli-2702 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cloudformation deploy does not honour tabs in JSON templates
Most of our templates are using tabs for the indentation and everything was fine until we tried to feed the template to `aws cloudformation deploy` which did not like the template despite that every other function in AWS CLI works with the template just fine:
```
[galaxy@athena templates]$ aws cloudformation validate-template --template-body file://codepipeline.template
{
"CapabilitiesReason": "The following resource(s) require capabilities: [AWS::IAM::Role]",
"Description": "Playing with CodeCommit, CodeBuild, and CodeDeploy",
"Parameters": [
```
and the only function that fails to parse the template is:
```
[galaxy@athena templates]$ aws cloudformation deploy --stack-name "galaxy-ccc" --template-file codepipeline.template --capabilities CAPABILITY_IAM
while scanning for the next token
found character '\t' that cannot start any token
in "<string>", line 2, column 1:
"AWSTemplateFormatVersion": "20 ...
^
```
A quick fix is to replace tabs with spaces:
```
[galaxy@athena templates]$ sed 's,\t, ,g' codepipeline.template > c.template
[galaxy@athena templates]$ aws cloudformation deploy --stack-name "galaxy-ccc" --template-file c.template --capabilities CAPABILITY_IAM
Waiting for changeset to be created..
Waiting for stack create/update to complete
```
... but it would mean that we would need to fix all our templates which are valid JSON just to workaround a bug in the tool! :(
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/cloudformation/yamlhelper.py`
Content:
```
1 # Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13
14 import yaml
15 from awscli.compat import six
16 from yaml.resolver import ScalarNode, SequenceNode
17
18
19 def intrinsics_multi_constructor(loader, tag_prefix, node):
20 """
21 YAML constructor to parse CloudFormation intrinsics.
22 This will return a dictionary with key being the instrinsic name
23 """
24
25 # Get the actual tag name excluding the first exclamation
26 tag = node.tag[1:]
27
28 # All CloudFormation intrinsics have prefix Fn:: except Ref
29 prefix = "Fn::"
30 if tag == "Ref":
31 prefix = ""
32
33 cfntag = prefix + tag
34
35 if tag == "GetAtt" and isinstance(node.value, six.string_types):
36 # ShortHand notation for !GetAtt accepts Resource.Attribute format
37 # while the standard notation is to use an array
38 # [Resource, Attribute]. Convert shorthand to standard format
39 value = node.value.split(".", 1)
40
41 elif isinstance(node, ScalarNode):
42 # Value of this node is scalar
43 value = loader.construct_scalar(node)
44
45 elif isinstance(node, SequenceNode):
46 # Value of this node is an array (Ex: [1,2])
47 value = loader.construct_sequence(node)
48
49 else:
50 # Value of this node is an mapping (ex: {foo: bar})
51 value = loader.construct_mapping(node)
52
53 return {cfntag: value}
54
55
56 def yaml_dump(dict_to_dump):
57 """
58 Dumps the dictionary as a YAML document
59 :param dict_to_dump:
60 :return:
61 """
62 return yaml.safe_dump(dict_to_dump, default_flow_style=False)
63
64
65 def yaml_parse(yamlstr):
66
67 yaml.SafeLoader.add_multi_constructor("!", intrinsics_multi_constructor)
68
69 return yaml.safe_load(yamlstr)
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/customizations/cloudformation/yamlhelper.py b/awscli/customizations/cloudformation/yamlhelper.py
--- a/awscli/customizations/cloudformation/yamlhelper.py
+++ b/awscli/customizations/cloudformation/yamlhelper.py
@@ -10,11 +10,12 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-
+import json
import yaml
-from awscli.compat import six
from yaml.resolver import ScalarNode, SequenceNode
+from awscli.compat import six
+
def intrinsics_multi_constructor(loader, tag_prefix, node):
"""
@@ -63,7 +64,13 @@
def yaml_parse(yamlstr):
-
- yaml.SafeLoader.add_multi_constructor("!", intrinsics_multi_constructor)
-
- return yaml.safe_load(yamlstr)
+ """Parse a yaml string"""
+ try:
+ # PyYAML doesn't support json as well as it should, so if the input
+ # is actually just json it is better to parse it with the standard
+ # json parser.
+ return json.loads(yamlstr)
+ except ValueError:
+ yaml.SafeLoader.add_multi_constructor(
+ "!", intrinsics_multi_constructor)
+ return yaml.safe_load(yamlstr)
| {"golden_diff": "diff --git a/awscli/customizations/cloudformation/yamlhelper.py b/awscli/customizations/cloudformation/yamlhelper.py\n--- a/awscli/customizations/cloudformation/yamlhelper.py\n+++ b/awscli/customizations/cloudformation/yamlhelper.py\n@@ -10,11 +10,12 @@\n # distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n # ANY KIND, either express or implied. See the License for the specific\n # language governing permissions and limitations under the License.\n-\n+import json\n import yaml\n-from awscli.compat import six\n from yaml.resolver import ScalarNode, SequenceNode\n \n+from awscli.compat import six\n+\n \n def intrinsics_multi_constructor(loader, tag_prefix, node):\n \"\"\"\n@@ -63,7 +64,13 @@\n \n \n def yaml_parse(yamlstr):\n-\n- yaml.SafeLoader.add_multi_constructor(\"!\", intrinsics_multi_constructor)\n-\n- return yaml.safe_load(yamlstr)\n+ \"\"\"Parse a yaml string\"\"\"\n+ try:\n+ # PyYAML doesn't support json as well as it should, so if the input\n+ # is actually just json it is better to parse it with the standard\n+ # json parser.\n+ return json.loads(yamlstr)\n+ except ValueError:\n+ yaml.SafeLoader.add_multi_constructor(\n+ \"!\", intrinsics_multi_constructor)\n+ return yaml.safe_load(yamlstr)\n", "issue": "cloudformation deploy does not honour tabs in JSON templates\nMost of our templates are using tabs for the indentation and everything was fine until we tried to feed the template to `aws cloudformation deploy` which did not like the template despite that every other function in AWS CLI works with the template just fine:\r\n\r\n```\r\n[galaxy@athena templates]$ aws cloudformation validate-template --template-body file://codepipeline.template\r\n{\r\n \"CapabilitiesReason\": \"The following resource(s) require capabilities: [AWS::IAM::Role]\", \r\n \"Description\": \"Playing with CodeCommit, CodeBuild, and CodeDeploy\", \r\n \"Parameters\": [\r\n```\r\nand the only function that fails to parse the template is:\r\n```\r\n[galaxy@athena templates]$ aws cloudformation deploy --stack-name \"galaxy-ccc\" --template-file codepipeline.template --capabilities CAPABILITY_IAM\r\n\r\nwhile scanning for the next token\r\nfound character '\\t' that cannot start any token\r\n in \"<string>\", line 2, column 1:\r\n \t\"AWSTemplateFormatVersion\": \"20 ... \r\n ^\r\n```\r\nA quick fix is to replace tabs with spaces:\r\n```\r\n[galaxy@athena templates]$ sed 's,\\t, ,g' codepipeline.template > c.template\r\n[galaxy@athena templates]$ aws cloudformation deploy --stack-name \"galaxy-ccc\" --template-file c.template --capabilities CAPABILITY_IAM\r\nWaiting for changeset to be created..\r\nWaiting for stack create/update to complete\r\n```\r\n\r\n... but it would mean that we would need to fix all our templates which are valid JSON just to workaround a bug in the tool! :(\n", "before_files": [{"content": "# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport yaml\nfrom awscli.compat import six\nfrom yaml.resolver import ScalarNode, SequenceNode\n\n\ndef intrinsics_multi_constructor(loader, tag_prefix, node):\n \"\"\"\n YAML constructor to parse CloudFormation intrinsics.\n This will return a dictionary with key being the instrinsic name\n \"\"\"\n\n # Get the actual tag name excluding the first exclamation\n tag = node.tag[1:]\n\n # All CloudFormation intrinsics have prefix Fn:: except Ref\n prefix = \"Fn::\"\n if tag == \"Ref\":\n prefix = \"\"\n\n cfntag = prefix + tag\n\n if tag == \"GetAtt\" and isinstance(node.value, six.string_types):\n # ShortHand notation for !GetAtt accepts Resource.Attribute format\n # while the standard notation is to use an array\n # [Resource, Attribute]. Convert shorthand to standard format\n value = node.value.split(\".\", 1)\n\n elif isinstance(node, ScalarNode):\n # Value of this node is scalar\n value = loader.construct_scalar(node)\n\n elif isinstance(node, SequenceNode):\n # Value of this node is an array (Ex: [1,2])\n value = loader.construct_sequence(node)\n\n else:\n # Value of this node is an mapping (ex: {foo: bar})\n value = loader.construct_mapping(node)\n\n return {cfntag: value}\n\n\ndef yaml_dump(dict_to_dump):\n \"\"\"\n Dumps the dictionary as a YAML document\n :param dict_to_dump:\n :return:\n \"\"\"\n return yaml.safe_dump(dict_to_dump, default_flow_style=False)\n\n\ndef yaml_parse(yamlstr):\n\n yaml.SafeLoader.add_multi_constructor(\"!\", intrinsics_multi_constructor)\n\n return yaml.safe_load(yamlstr)\n", "path": "awscli/customizations/cloudformation/yamlhelper.py"}], "after_files": [{"content": "# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport json\nimport yaml\nfrom yaml.resolver import ScalarNode, SequenceNode\n\nfrom awscli.compat import six\n\n\ndef intrinsics_multi_constructor(loader, tag_prefix, node):\n \"\"\"\n YAML constructor to parse CloudFormation intrinsics.\n This will return a dictionary with key being the instrinsic name\n \"\"\"\n\n # Get the actual tag name excluding the first exclamation\n tag = node.tag[1:]\n\n # All CloudFormation intrinsics have prefix Fn:: except Ref\n prefix = \"Fn::\"\n if tag == \"Ref\":\n prefix = \"\"\n\n cfntag = prefix + tag\n\n if tag == \"GetAtt\" and isinstance(node.value, six.string_types):\n # ShortHand notation for !GetAtt accepts Resource.Attribute format\n # while the standard notation is to use an array\n # [Resource, Attribute]. Convert shorthand to standard format\n value = node.value.split(\".\", 1)\n\n elif isinstance(node, ScalarNode):\n # Value of this node is scalar\n value = loader.construct_scalar(node)\n\n elif isinstance(node, SequenceNode):\n # Value of this node is an array (Ex: [1,2])\n value = loader.construct_sequence(node)\n\n else:\n # Value of this node is an mapping (ex: {foo: bar})\n value = loader.construct_mapping(node)\n\n return {cfntag: value}\n\n\ndef yaml_dump(dict_to_dump):\n \"\"\"\n Dumps the dictionary as a YAML document\n :param dict_to_dump:\n :return:\n \"\"\"\n return yaml.safe_dump(dict_to_dump, default_flow_style=False)\n\n\ndef yaml_parse(yamlstr):\n \"\"\"Parse a yaml string\"\"\"\n try:\n # PyYAML doesn't support json as well as it should, so if the input\n # is actually just json it is better to parse it with the standard\n # json parser.\n return json.loads(yamlstr)\n except ValueError:\n yaml.SafeLoader.add_multi_constructor(\n \"!\", intrinsics_multi_constructor)\n return yaml.safe_load(yamlstr)\n", "path": "awscli/customizations/cloudformation/yamlhelper.py"}]} | 1,257 | 304 |
gh_patches_debug_9712 | rasdani/github-patches | git_diff | huggingface__dataset-viewer-2733 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
/search and /filter are currently broken
Any search or filter is currently failing.
https://datasets-server.huggingface.co/search?dataset=gsarti%2Fflores_101&config=afr&split=devtest&offset=0&length=100&query=a
https://datasets-server.huggingface.co/filter?dataset=gsarti%2Fflores_101&config=afr&split=devtest&offset=0&length=100&where=id%3E%3D409+and+id%3C511
Logs:
```
FileNotFoundError: [Errno 2] No such file or directory: '/tmp/duckdb-index'
```
when doing `disk_stat = os.statvfs(path)` here:
https://github.com/huggingface/dataset-viewer/blob/a489c0b6ad4e5a78b2670679abbfab93f9be5742/libs/libapi/src/libapi/duckdb.py#L78
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libs/libapi/src/libapi/duckdb.py`
Content:
```
1 # SPDX-License-Identifier: Apache-2.0
2 # Copyright 2023 The HuggingFace Authors.
3
4 import errno
5 import json
6 import logging
7 import os
8 import re
9 from hashlib import sha1
10 from typing import Optional
11
12 import anyio
13 from anyio import Path
14 from libcommon.constants import DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY, SPLIT_DUCKDB_INDEX_KIND
15 from libcommon.parquet_utils import extract_split_name_from_parquet_url
16 from libcommon.prometheus import StepProfiler
17 from libcommon.simple_cache import CacheEntry
18 from libcommon.storage import StrPath, init_dir
19 from libcommon.storage_client import StorageClient
20 from libcommon.utils import download_file_from_hub
21
22 from libapi.exceptions import DownloadIndexError
23 from libapi.utils import get_cache_entry_from_step
24
25 REPO_TYPE = "dataset"
26 HUB_DOWNLOAD_CACHE_FOLDER = "cache"
27
28
29 async def get_index_file_location_and_download_if_missing(
30 duckdb_index_file_directory: StrPath,
31 dataset: str,
32 revision: str,
33 config: str,
34 split: str,
35 filename: str,
36 size_bytes: int,
37 url: str,
38 target_revision: str,
39 hf_token: Optional[str],
40 ) -> str:
41 with StepProfiler(method="get_index_file_location_and_download_if_missing", step="all"):
42 index_folder = get_download_folder(duckdb_index_file_directory, size_bytes, dataset, config, split, revision)
43 # For directories like "partial-train" for the file
44 # at "en/partial-train/0000.parquet" in the C4 dataset.
45 # Note that "-" is forbidden for split names, so it doesn't create directory names collisions.
46 split_directory = extract_split_name_from_parquet_url(url)
47 repo_file_location = f"{config}/{split_directory}/{filename}"
48 index_file_location = f"{index_folder}/{repo_file_location}"
49 index_path = Path(index_file_location)
50 if not await index_path.is_file():
51 with StepProfiler(method="get_index_file_location_and_download_if_missing", step="download index file"):
52 cache_folder = f"{duckdb_index_file_directory}/{HUB_DOWNLOAD_CACHE_FOLDER}"
53 await anyio.to_thread.run_sync(
54 download_index_file,
55 cache_folder,
56 index_folder,
57 target_revision,
58 dataset,
59 repo_file_location,
60 hf_token,
61 )
62 # Update its modification time
63 await index_path.touch()
64 return index_file_location
65
66
67 def get_download_folder(
68 root_directory: StrPath, size_bytes: int, dataset: str, revision: str, config: str, split: str
69 ) -> str:
70 check_available_disk_space(root_directory, size_bytes)
71 payload = (dataset, config, split, revision)
72 hash_suffix = sha1(json.dumps(payload, sort_keys=True).encode(), usedforsecurity=False).hexdigest()[:8]
73 subdirectory = "".join([c if re.match(r"[\w-]", c) else "-" for c in f"{dataset}-{hash_suffix}"])
74 return f"{root_directory}/{DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY}/{subdirectory}"
75
76
77 def check_available_disk_space(path: StrPath, required_space: int) -> None:
78 disk_stat = os.statvfs(path)
79 # Calculate free space in bytes
80 free_space = disk_stat.f_bavail * disk_stat.f_frsize
81 logging.debug(f"{free_space} available space, needed {required_space}")
82 if free_space < required_space:
83 raise DownloadIndexError(
84 "Cannot perform the search due to a lack of disk space on the server. Please report the issue."
85 )
86
87
88 def download_index_file(
89 cache_folder: str,
90 index_folder: str,
91 target_revision: str,
92 dataset: str,
93 repo_file_location: str,
94 hf_token: Optional[str] = None,
95 ) -> None:
96 logging.info(f"init_dir {index_folder}")
97 try:
98 init_dir(index_folder)
99 download_file_from_hub(
100 repo_type=REPO_TYPE,
101 revision=target_revision,
102 repo_id=dataset,
103 filename=repo_file_location,
104 local_dir=index_folder,
105 hf_token=hf_token,
106 cache_dir=cache_folder,
107 )
108 except OSError as err:
109 if err.errno == errno.ENOSPC:
110 raise DownloadIndexError(
111 "Cannot perform the operation due to a lack of disk space on the server. Please report the issue.", err
112 )
113
114
115 def get_cache_entry_from_duckdb_index_job(
116 dataset: str,
117 config: str,
118 split: str,
119 hf_endpoint: str,
120 hf_token: Optional[str],
121 hf_timeout_seconds: Optional[float],
122 blocked_datasets: list[str],
123 storage_clients: Optional[list[StorageClient]] = None,
124 ) -> CacheEntry:
125 return get_cache_entry_from_step(
126 processing_step_name=SPLIT_DUCKDB_INDEX_KIND,
127 dataset=dataset,
128 config=config,
129 split=split,
130 hf_endpoint=hf_endpoint,
131 hf_token=hf_token,
132 hf_timeout_seconds=hf_timeout_seconds,
133 blocked_datasets=blocked_datasets,
134 storage_clients=storage_clients,
135 )
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libs/libapi/src/libapi/duckdb.py b/libs/libapi/src/libapi/duckdb.py
--- a/libs/libapi/src/libapi/duckdb.py
+++ b/libs/libapi/src/libapi/duckdb.py
@@ -75,7 +75,12 @@
def check_available_disk_space(path: StrPath, required_space: int) -> None:
- disk_stat = os.statvfs(path)
+ try:
+ disk_stat = os.statvfs(path)
+ except FileNotFoundError:
+ # The path does not exist, we create it and
+ init_dir(path)
+ disk_stat = os.statvfs(path)
# Calculate free space in bytes
free_space = disk_stat.f_bavail * disk_stat.f_frsize
logging.debug(f"{free_space} available space, needed {required_space}")
| {"golden_diff": "diff --git a/libs/libapi/src/libapi/duckdb.py b/libs/libapi/src/libapi/duckdb.py\n--- a/libs/libapi/src/libapi/duckdb.py\n+++ b/libs/libapi/src/libapi/duckdb.py\n@@ -75,7 +75,12 @@\n \n \n def check_available_disk_space(path: StrPath, required_space: int) -> None:\n- disk_stat = os.statvfs(path)\n+ try:\n+ disk_stat = os.statvfs(path)\n+ except FileNotFoundError:\n+ # The path does not exist, we create it and\n+ init_dir(path)\n+ disk_stat = os.statvfs(path)\n # Calculate free space in bytes\n free_space = disk_stat.f_bavail * disk_stat.f_frsize\n logging.debug(f\"{free_space} available space, needed {required_space}\")\n", "issue": "/search and /filter are currently broken\nAny search or filter is currently failing.\r\n\r\nhttps://datasets-server.huggingface.co/search?dataset=gsarti%2Fflores_101&config=afr&split=devtest&offset=0&length=100&query=a\r\nhttps://datasets-server.huggingface.co/filter?dataset=gsarti%2Fflores_101&config=afr&split=devtest&offset=0&length=100&where=id%3E%3D409+and+id%3C511\r\n\r\nLogs:\r\n\r\n```\r\nFileNotFoundError: [Errno 2] No such file or directory: '/tmp/duckdb-index'\r\n```\r\n\r\nwhen doing `disk_stat = os.statvfs(path)` here: \r\n\r\nhttps://github.com/huggingface/dataset-viewer/blob/a489c0b6ad4e5a78b2670679abbfab93f9be5742/libs/libapi/src/libapi/duckdb.py#L78\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2023 The HuggingFace Authors.\n\nimport errno\nimport json\nimport logging\nimport os\nimport re\nfrom hashlib import sha1\nfrom typing import Optional\n\nimport anyio\nfrom anyio import Path\nfrom libcommon.constants import DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY, SPLIT_DUCKDB_INDEX_KIND\nfrom libcommon.parquet_utils import extract_split_name_from_parquet_url\nfrom libcommon.prometheus import StepProfiler\nfrom libcommon.simple_cache import CacheEntry\nfrom libcommon.storage import StrPath, init_dir\nfrom libcommon.storage_client import StorageClient\nfrom libcommon.utils import download_file_from_hub\n\nfrom libapi.exceptions import DownloadIndexError\nfrom libapi.utils import get_cache_entry_from_step\n\nREPO_TYPE = \"dataset\"\nHUB_DOWNLOAD_CACHE_FOLDER = \"cache\"\n\n\nasync def get_index_file_location_and_download_if_missing(\n duckdb_index_file_directory: StrPath,\n dataset: str,\n revision: str,\n config: str,\n split: str,\n filename: str,\n size_bytes: int,\n url: str,\n target_revision: str,\n hf_token: Optional[str],\n) -> str:\n with StepProfiler(method=\"get_index_file_location_and_download_if_missing\", step=\"all\"):\n index_folder = get_download_folder(duckdb_index_file_directory, size_bytes, dataset, config, split, revision)\n # For directories like \"partial-train\" for the file\n # at \"en/partial-train/0000.parquet\" in the C4 dataset.\n # Note that \"-\" is forbidden for split names, so it doesn't create directory names collisions.\n split_directory = extract_split_name_from_parquet_url(url)\n repo_file_location = f\"{config}/{split_directory}/{filename}\"\n index_file_location = f\"{index_folder}/{repo_file_location}\"\n index_path = Path(index_file_location)\n if not await index_path.is_file():\n with StepProfiler(method=\"get_index_file_location_and_download_if_missing\", step=\"download index file\"):\n cache_folder = f\"{duckdb_index_file_directory}/{HUB_DOWNLOAD_CACHE_FOLDER}\"\n await anyio.to_thread.run_sync(\n download_index_file,\n cache_folder,\n index_folder,\n target_revision,\n dataset,\n repo_file_location,\n hf_token,\n )\n # Update its modification time\n await index_path.touch()\n return index_file_location\n\n\ndef get_download_folder(\n root_directory: StrPath, size_bytes: int, dataset: str, revision: str, config: str, split: str\n) -> str:\n check_available_disk_space(root_directory, size_bytes)\n payload = (dataset, config, split, revision)\n hash_suffix = sha1(json.dumps(payload, sort_keys=True).encode(), usedforsecurity=False).hexdigest()[:8]\n subdirectory = \"\".join([c if re.match(r\"[\\w-]\", c) else \"-\" for c in f\"{dataset}-{hash_suffix}\"])\n return f\"{root_directory}/{DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY}/{subdirectory}\"\n\n\ndef check_available_disk_space(path: StrPath, required_space: int) -> None:\n disk_stat = os.statvfs(path)\n # Calculate free space in bytes\n free_space = disk_stat.f_bavail * disk_stat.f_frsize\n logging.debug(f\"{free_space} available space, needed {required_space}\")\n if free_space < required_space:\n raise DownloadIndexError(\n \"Cannot perform the search due to a lack of disk space on the server. Please report the issue.\"\n )\n\n\ndef download_index_file(\n cache_folder: str,\n index_folder: str,\n target_revision: str,\n dataset: str,\n repo_file_location: str,\n hf_token: Optional[str] = None,\n) -> None:\n logging.info(f\"init_dir {index_folder}\")\n try:\n init_dir(index_folder)\n download_file_from_hub(\n repo_type=REPO_TYPE,\n revision=target_revision,\n repo_id=dataset,\n filename=repo_file_location,\n local_dir=index_folder,\n hf_token=hf_token,\n cache_dir=cache_folder,\n )\n except OSError as err:\n if err.errno == errno.ENOSPC:\n raise DownloadIndexError(\n \"Cannot perform the operation due to a lack of disk space on the server. Please report the issue.\", err\n )\n\n\ndef get_cache_entry_from_duckdb_index_job(\n dataset: str,\n config: str,\n split: str,\n hf_endpoint: str,\n hf_token: Optional[str],\n hf_timeout_seconds: Optional[float],\n blocked_datasets: list[str],\n storage_clients: Optional[list[StorageClient]] = None,\n) -> CacheEntry:\n return get_cache_entry_from_step(\n processing_step_name=SPLIT_DUCKDB_INDEX_KIND,\n dataset=dataset,\n config=config,\n split=split,\n hf_endpoint=hf_endpoint,\n hf_token=hf_token,\n hf_timeout_seconds=hf_timeout_seconds,\n blocked_datasets=blocked_datasets,\n storage_clients=storage_clients,\n )\n", "path": "libs/libapi/src/libapi/duckdb.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2023 The HuggingFace Authors.\n\nimport errno\nimport json\nimport logging\nimport os\nimport re\nfrom hashlib import sha1\nfrom typing import Optional\n\nimport anyio\nfrom anyio import Path\nfrom libcommon.constants import DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY, SPLIT_DUCKDB_INDEX_KIND\nfrom libcommon.parquet_utils import extract_split_name_from_parquet_url\nfrom libcommon.prometheus import StepProfiler\nfrom libcommon.simple_cache import CacheEntry\nfrom libcommon.storage import StrPath, init_dir\nfrom libcommon.storage_client import StorageClient\nfrom libcommon.utils import download_file_from_hub\n\nfrom libapi.exceptions import DownloadIndexError\nfrom libapi.utils import get_cache_entry_from_step\n\nREPO_TYPE = \"dataset\"\nHUB_DOWNLOAD_CACHE_FOLDER = \"cache\"\n\n\nasync def get_index_file_location_and_download_if_missing(\n duckdb_index_file_directory: StrPath,\n dataset: str,\n revision: str,\n config: str,\n split: str,\n filename: str,\n size_bytes: int,\n url: str,\n target_revision: str,\n hf_token: Optional[str],\n) -> str:\n with StepProfiler(method=\"get_index_file_location_and_download_if_missing\", step=\"all\"):\n index_folder = get_download_folder(duckdb_index_file_directory, size_bytes, dataset, config, split, revision)\n # For directories like \"partial-train\" for the file\n # at \"en/partial-train/0000.parquet\" in the C4 dataset.\n # Note that \"-\" is forbidden for split names, so it doesn't create directory names collisions.\n split_directory = extract_split_name_from_parquet_url(url)\n repo_file_location = f\"{config}/{split_directory}/{filename}\"\n index_file_location = f\"{index_folder}/{repo_file_location}\"\n index_path = Path(index_file_location)\n if not await index_path.is_file():\n with StepProfiler(method=\"get_index_file_location_and_download_if_missing\", step=\"download index file\"):\n cache_folder = f\"{duckdb_index_file_directory}/{HUB_DOWNLOAD_CACHE_FOLDER}\"\n await anyio.to_thread.run_sync(\n download_index_file,\n cache_folder,\n index_folder,\n target_revision,\n dataset,\n repo_file_location,\n hf_token,\n )\n # Update its modification time\n await index_path.touch()\n return index_file_location\n\n\ndef get_download_folder(\n root_directory: StrPath, size_bytes: int, dataset: str, revision: str, config: str, split: str\n) -> str:\n check_available_disk_space(root_directory, size_bytes)\n payload = (dataset, config, split, revision)\n hash_suffix = sha1(json.dumps(payload, sort_keys=True).encode(), usedforsecurity=False).hexdigest()[:8]\n subdirectory = \"\".join([c if re.match(r\"[\\w-]\", c) else \"-\" for c in f\"{dataset}-{hash_suffix}\"])\n return f\"{root_directory}/{DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY}/{subdirectory}\"\n\n\ndef check_available_disk_space(path: StrPath, required_space: int) -> None:\n try:\n disk_stat = os.statvfs(path)\n except FileNotFoundError:\n # The path does not exist, we create it and\n init_dir(path)\n disk_stat = os.statvfs(path)\n # Calculate free space in bytes\n free_space = disk_stat.f_bavail * disk_stat.f_frsize\n logging.debug(f\"{free_space} available space, needed {required_space}\")\n if free_space < required_space:\n raise DownloadIndexError(\n \"Cannot perform the search due to a lack of disk space on the server. Please report the issue.\"\n )\n\n\ndef download_index_file(\n cache_folder: str,\n index_folder: str,\n target_revision: str,\n dataset: str,\n repo_file_location: str,\n hf_token: Optional[str] = None,\n) -> None:\n logging.info(f\"init_dir {index_folder}\")\n try:\n init_dir(index_folder)\n download_file_from_hub(\n repo_type=REPO_TYPE,\n revision=target_revision,\n repo_id=dataset,\n filename=repo_file_location,\n local_dir=index_folder,\n hf_token=hf_token,\n cache_dir=cache_folder,\n )\n except OSError as err:\n if err.errno == errno.ENOSPC:\n raise DownloadIndexError(\n \"Cannot perform the operation due to a lack of disk space on the server. Please report the issue.\", err\n )\n\n\ndef get_cache_entry_from_duckdb_index_job(\n dataset: str,\n config: str,\n split: str,\n hf_endpoint: str,\n hf_token: Optional[str],\n hf_timeout_seconds: Optional[float],\n blocked_datasets: list[str],\n storage_clients: Optional[list[StorageClient]] = None,\n) -> CacheEntry:\n return get_cache_entry_from_step(\n processing_step_name=SPLIT_DUCKDB_INDEX_KIND,\n dataset=dataset,\n config=config,\n split=split,\n hf_endpoint=hf_endpoint,\n hf_token=hf_token,\n hf_timeout_seconds=hf_timeout_seconds,\n blocked_datasets=blocked_datasets,\n storage_clients=storage_clients,\n )\n", "path": "libs/libapi/src/libapi/duckdb.py"}]} | 1,880 | 183 |
gh_patches_debug_15678 | rasdani/github-patches | git_diff | celery__celery-8338 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
database backend does not store children
The [`Task`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L20-L27) and [`TaskExtended`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L57-L62) models for the `database` backend do not include `children`. This means that when using any `database` backend, [`AsyncResult.children`](https://github.com/celery/celery/blob/main/celery/result.py#L424) is always empty, even if a task does have children.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `celery/backends/database/models.py`
Content:
```
1 """Database models used by the SQLAlchemy result store backend."""
2 from datetime import datetime
3
4 import sqlalchemy as sa
5 from sqlalchemy.types import PickleType
6
7 from celery import states
8
9 from .session import ResultModelBase
10
11 __all__ = ('Task', 'TaskExtended', 'TaskSet')
12
13
14 class Task(ResultModelBase):
15 """Task result/status."""
16
17 __tablename__ = 'celery_taskmeta'
18 __table_args__ = {'sqlite_autoincrement': True}
19
20 id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'),
21 primary_key=True, autoincrement=True)
22 task_id = sa.Column(sa.String(155), unique=True)
23 status = sa.Column(sa.String(50), default=states.PENDING)
24 result = sa.Column(PickleType, nullable=True)
25 date_done = sa.Column(sa.DateTime, default=datetime.utcnow,
26 onupdate=datetime.utcnow, nullable=True)
27 traceback = sa.Column(sa.Text, nullable=True)
28
29 def __init__(self, task_id):
30 self.task_id = task_id
31
32 def to_dict(self):
33 return {
34 'task_id': self.task_id,
35 'status': self.status,
36 'result': self.result,
37 'traceback': self.traceback,
38 'date_done': self.date_done,
39 }
40
41 def __repr__(self):
42 return '<Task {0.task_id} state: {0.status}>'.format(self)
43
44 @classmethod
45 def configure(cls, schema=None, name=None):
46 cls.__table__.schema = schema
47 cls.id.default.schema = schema
48 cls.__table__.name = name or cls.__tablename__
49
50
51 class TaskExtended(Task):
52 """For the extend result."""
53
54 __tablename__ = 'celery_taskmeta'
55 __table_args__ = {'sqlite_autoincrement': True, 'extend_existing': True}
56
57 name = sa.Column(sa.String(155), nullable=True)
58 args = sa.Column(sa.LargeBinary, nullable=True)
59 kwargs = sa.Column(sa.LargeBinary, nullable=True)
60 worker = sa.Column(sa.String(155), nullable=True)
61 retries = sa.Column(sa.Integer, nullable=True)
62 queue = sa.Column(sa.String(155), nullable=True)
63
64 def to_dict(self):
65 task_dict = super().to_dict()
66 task_dict.update({
67 'name': self.name,
68 'args': self.args,
69 'kwargs': self.kwargs,
70 'worker': self.worker,
71 'retries': self.retries,
72 'queue': self.queue,
73 })
74 return task_dict
75
76
77 class TaskSet(ResultModelBase):
78 """TaskSet result."""
79
80 __tablename__ = 'celery_tasksetmeta'
81 __table_args__ = {'sqlite_autoincrement': True}
82
83 id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'),
84 autoincrement=True, primary_key=True)
85 taskset_id = sa.Column(sa.String(155), unique=True)
86 result = sa.Column(PickleType, nullable=True)
87 date_done = sa.Column(sa.DateTime, default=datetime.utcnow,
88 nullable=True)
89
90 def __init__(self, taskset_id, result):
91 self.taskset_id = taskset_id
92 self.result = result
93
94 def to_dict(self):
95 return {
96 'taskset_id': self.taskset_id,
97 'result': self.result,
98 'date_done': self.date_done,
99 }
100
101 def __repr__(self):
102 return f'<TaskSet: {self.taskset_id}>'
103
104 @classmethod
105 def configure(cls, schema=None, name=None):
106 cls.__table__.schema = schema
107 cls.id.default.schema = schema
108 cls.__table__.name = name or cls.__tablename__
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/celery/backends/database/models.py b/celery/backends/database/models.py
--- a/celery/backends/database/models.py
+++ b/celery/backends/database/models.py
@@ -25,6 +25,7 @@
date_done = sa.Column(sa.DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow, nullable=True)
traceback = sa.Column(sa.Text, nullable=True)
+ children = sa.Column(PickleType, nullable=True)
def __init__(self, task_id):
self.task_id = task_id
@@ -36,6 +37,7 @@
'result': self.result,
'traceback': self.traceback,
'date_done': self.date_done,
+ 'children': self.children,
}
def __repr__(self):
| {"golden_diff": "diff --git a/celery/backends/database/models.py b/celery/backends/database/models.py\n--- a/celery/backends/database/models.py\n+++ b/celery/backends/database/models.py\n@@ -25,6 +25,7 @@\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n onupdate=datetime.utcnow, nullable=True)\n traceback = sa.Column(sa.Text, nullable=True)\n+ children = sa.Column(PickleType, nullable=True)\n \n def __init__(self, task_id):\n self.task_id = task_id\n@@ -36,6 +37,7 @@\n 'result': self.result,\n 'traceback': self.traceback,\n 'date_done': self.date_done,\n+ 'children': self.children,\n }\n \n def __repr__(self):\n", "issue": "database backend does not store children\nThe [`Task`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L20-L27) and [`TaskExtended`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L57-L62) models for the `database` backend do not include `children`. This means that when using any `database` backend, [`AsyncResult.children`](https://github.com/celery/celery/blob/main/celery/result.py#L424) is always empty, even if a task does have children.\n", "before_files": [{"content": "\"\"\"Database models used by the SQLAlchemy result store backend.\"\"\"\nfrom datetime import datetime\n\nimport sqlalchemy as sa\nfrom sqlalchemy.types import PickleType\n\nfrom celery import states\n\nfrom .session import ResultModelBase\n\n__all__ = ('Task', 'TaskExtended', 'TaskSet')\n\n\nclass Task(ResultModelBase):\n \"\"\"Task result/status.\"\"\"\n\n __tablename__ = 'celery_taskmeta'\n __table_args__ = {'sqlite_autoincrement': True}\n\n id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'),\n primary_key=True, autoincrement=True)\n task_id = sa.Column(sa.String(155), unique=True)\n status = sa.Column(sa.String(50), default=states.PENDING)\n result = sa.Column(PickleType, nullable=True)\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n onupdate=datetime.utcnow, nullable=True)\n traceback = sa.Column(sa.Text, nullable=True)\n\n def __init__(self, task_id):\n self.task_id = task_id\n\n def to_dict(self):\n return {\n 'task_id': self.task_id,\n 'status': self.status,\n 'result': self.result,\n 'traceback': self.traceback,\n 'date_done': self.date_done,\n }\n\n def __repr__(self):\n return '<Task {0.task_id} state: {0.status}>'.format(self)\n\n @classmethod\n def configure(cls, schema=None, name=None):\n cls.__table__.schema = schema\n cls.id.default.schema = schema\n cls.__table__.name = name or cls.__tablename__\n\n\nclass TaskExtended(Task):\n \"\"\"For the extend result.\"\"\"\n\n __tablename__ = 'celery_taskmeta'\n __table_args__ = {'sqlite_autoincrement': True, 'extend_existing': True}\n\n name = sa.Column(sa.String(155), nullable=True)\n args = sa.Column(sa.LargeBinary, nullable=True)\n kwargs = sa.Column(sa.LargeBinary, nullable=True)\n worker = sa.Column(sa.String(155), nullable=True)\n retries = sa.Column(sa.Integer, nullable=True)\n queue = sa.Column(sa.String(155), nullable=True)\n\n def to_dict(self):\n task_dict = super().to_dict()\n task_dict.update({\n 'name': self.name,\n 'args': self.args,\n 'kwargs': self.kwargs,\n 'worker': self.worker,\n 'retries': self.retries,\n 'queue': self.queue,\n })\n return task_dict\n\n\nclass TaskSet(ResultModelBase):\n \"\"\"TaskSet result.\"\"\"\n\n __tablename__ = 'celery_tasksetmeta'\n __table_args__ = {'sqlite_autoincrement': True}\n\n id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'),\n autoincrement=True, primary_key=True)\n taskset_id = sa.Column(sa.String(155), unique=True)\n result = sa.Column(PickleType, nullable=True)\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n nullable=True)\n\n def __init__(self, taskset_id, result):\n self.taskset_id = taskset_id\n self.result = result\n\n def to_dict(self):\n return {\n 'taskset_id': self.taskset_id,\n 'result': self.result,\n 'date_done': self.date_done,\n }\n\n def __repr__(self):\n return f'<TaskSet: {self.taskset_id}>'\n\n @classmethod\n def configure(cls, schema=None, name=None):\n cls.__table__.schema = schema\n cls.id.default.schema = schema\n cls.__table__.name = name or cls.__tablename__\n", "path": "celery/backends/database/models.py"}], "after_files": [{"content": "\"\"\"Database models used by the SQLAlchemy result store backend.\"\"\"\nfrom datetime import datetime\n\nimport sqlalchemy as sa\nfrom sqlalchemy.types import PickleType\n\nfrom celery import states\n\nfrom .session import ResultModelBase\n\n__all__ = ('Task', 'TaskExtended', 'TaskSet')\n\n\nclass Task(ResultModelBase):\n \"\"\"Task result/status.\"\"\"\n\n __tablename__ = 'celery_taskmeta'\n __table_args__ = {'sqlite_autoincrement': True}\n\n id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'),\n primary_key=True, autoincrement=True)\n task_id = sa.Column(sa.String(155), unique=True)\n status = sa.Column(sa.String(50), default=states.PENDING)\n result = sa.Column(PickleType, nullable=True)\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n onupdate=datetime.utcnow, nullable=True)\n traceback = sa.Column(sa.Text, nullable=True)\n children = sa.Column(PickleType, nullable=True)\n\n def __init__(self, task_id):\n self.task_id = task_id\n\n def to_dict(self):\n return {\n 'task_id': self.task_id,\n 'status': self.status,\n 'result': self.result,\n 'traceback': self.traceback,\n 'date_done': self.date_done,\n 'children': self.children,\n }\n\n def __repr__(self):\n return '<Task {0.task_id} state: {0.status}>'.format(self)\n\n @classmethod\n def configure(cls, schema=None, name=None):\n cls.__table__.schema = schema\n cls.id.default.schema = schema\n cls.__table__.name = name or cls.__tablename__\n\n\nclass TaskExtended(Task):\n \"\"\"For the extend result.\"\"\"\n\n __tablename__ = 'celery_taskmeta'\n __table_args__ = {'sqlite_autoincrement': True, 'extend_existing': True}\n\n name = sa.Column(sa.String(155), nullable=True)\n args = sa.Column(sa.LargeBinary, nullable=True)\n kwargs = sa.Column(sa.LargeBinary, nullable=True)\n worker = sa.Column(sa.String(155), nullable=True)\n retries = sa.Column(sa.Integer, nullable=True)\n queue = sa.Column(sa.String(155), nullable=True)\n\n def to_dict(self):\n task_dict = super().to_dict()\n task_dict.update({\n 'name': self.name,\n 'args': self.args,\n 'kwargs': self.kwargs,\n 'worker': self.worker,\n 'retries': self.retries,\n 'queue': self.queue,\n })\n return task_dict\n\n\nclass TaskSet(ResultModelBase):\n \"\"\"TaskSet result.\"\"\"\n\n __tablename__ = 'celery_tasksetmeta'\n __table_args__ = {'sqlite_autoincrement': True}\n\n id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'),\n autoincrement=True, primary_key=True)\n taskset_id = sa.Column(sa.String(155), unique=True)\n result = sa.Column(PickleType, nullable=True)\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n nullable=True)\n\n def __init__(self, taskset_id, result):\n self.taskset_id = taskset_id\n self.result = result\n\n def to_dict(self):\n return {\n 'taskset_id': self.taskset_id,\n 'result': self.result,\n 'date_done': self.date_done,\n }\n\n def __repr__(self):\n return f'<TaskSet: {self.taskset_id}>'\n\n @classmethod\n def configure(cls, schema=None, name=None):\n cls.__table__.schema = schema\n cls.id.default.schema = schema\n cls.__table__.name = name or cls.__tablename__\n", "path": "celery/backends/database/models.py"}]} | 1,411 | 176 |
gh_patches_debug_2226 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-5568 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support matplotlib-3.4.0rc1
## Description of the issue
`matplotlib._get_data_path` no longer exists since 3.4.0rc1: https://github.com/matplotlib/matplotlib/commit/e1352c71f07aee7eab004b73dd9bda2a260ab31b.
This is on schedule for the removal of the deprecations that occurred around the time of #5006. The missing function leads to a traceback output during build, and the whole `cwd` being crammed into `mpl-data`. Finally, `matplotlib` cannot be imported in the packaged app because it cannot find `mpl-data/matplotlibrc`.
## Context information (for bug reports)
* Output of `pyinstaller --version`: ```4.2```
* Version of Python: `3.8`
* Platform: `Windows`
* Did you also try this on another platform? Does it work there? `Surely it is a cross platform bug`
> * try the latest development version, using the following command:
>
> ```shell
> pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip
> ```
>
> * follow *all* the instructions in our "If Things Go Wrong" Guide
> (https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and
>
> ### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly)
>
> * [ ] start with clean installation
> * [ ] use the latest development version
> * [ ] Run your frozen program **from a command window (shell)** — instead of double-clicking on it
> * [ ] Package your program in **--onedir mode**
> * [ ] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file
> * [ ] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file.
>
This part of the template is irrelevant, as the responsible code is unchanged on `develop`
### A minimal example program which shows the error
```
pip install --pre matplotlib==3.4.0rc1 pyinstaller
echo "import matplotlib" > test.py
pyinstaller test.py
```
### Stacktrace / full error message
Building `pyinstaller test.py`:
```
20391 INFO: Loading module hook 'hook-matplotlib.py' from 'XXXXXXX'...
Traceback (most recent call last):
File "<string>", line 1, in <module>
AttributeError: module 'matplotlib' has no attribute '_get_data_path'
```
Running `test.exe`:
```
Traceback (most recent call last):
File "test.py", line 1, in <module>
import matplotlib
File "<frozen importlib._bootstrap>", line 991, in _find_and_load
File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 671, in _load_unlocked
File "PyInstaller\loader\pyimod03_importers.py", line 531, in exec_module
File "matplotlib\__init__.py", line 820, in <module>
File "matplotlib\__init__.py", line 725, in _rc_params_in_file
File "contextlib.py", line 113, in __enter__
File "matplotlib\__init__.py", line 703, in _open_file_or_url
FileNotFoundError: [Errno 2] No such file or directory: 'xxxxx\\matplotlib\\mpl-data\\matplotlibrc'
[2688] Failed to execute script test
```
## Possible resolution
Simply remove the first underscore in `matplotlib._get_data_path` in
https://github.com/pyinstaller/pyinstaller/blob/b9fcbbf86bc71addafc830debe289e7edb2a5697/PyInstaller/hooks/hook-matplotlib.py#L16
This works on my system.
I'm a little confused as to why the private function was being used in the first place. `matplotlib.get_data_path` has been available for some time.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-matplotlib.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2021, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12
13 from PyInstaller.utils.hooks import exec_statement
14
15 mpl_data_dir = exec_statement(
16 "import matplotlib; print(matplotlib._get_data_path())")
17
18 datas = [
19 (mpl_data_dir, "matplotlib/mpl-data"),
20 ]
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/hooks/hook-matplotlib.py b/PyInstaller/hooks/hook-matplotlib.py
--- a/PyInstaller/hooks/hook-matplotlib.py
+++ b/PyInstaller/hooks/hook-matplotlib.py
@@ -13,7 +13,7 @@
from PyInstaller.utils.hooks import exec_statement
mpl_data_dir = exec_statement(
- "import matplotlib; print(matplotlib._get_data_path())")
+ "import matplotlib; print(matplotlib.get_data_path())")
datas = [
(mpl_data_dir, "matplotlib/mpl-data"),
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-matplotlib.py b/PyInstaller/hooks/hook-matplotlib.py\n--- a/PyInstaller/hooks/hook-matplotlib.py\n+++ b/PyInstaller/hooks/hook-matplotlib.py\n@@ -13,7 +13,7 @@\n from PyInstaller.utils.hooks import exec_statement\n \n mpl_data_dir = exec_statement(\n- \"import matplotlib; print(matplotlib._get_data_path())\")\n+ \"import matplotlib; print(matplotlib.get_data_path())\")\n \n datas = [\n (mpl_data_dir, \"matplotlib/mpl-data\"),\n", "issue": "Support matplotlib-3.4.0rc1\n## Description of the issue\r\n`matplotlib._get_data_path` no longer exists since 3.4.0rc1: https://github.com/matplotlib/matplotlib/commit/e1352c71f07aee7eab004b73dd9bda2a260ab31b.\r\nThis is on schedule for the removal of the deprecations that occurred around the time of #5006. The missing function leads to a traceback output during build, and the whole `cwd` being crammed into `mpl-data`. Finally, `matplotlib` cannot be imported in the packaged app because it cannot find `mpl-data/matplotlibrc`.\r\n## Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```4.2```\r\n* Version of Python: `3.8`\r\n* Platform: `Windows`\r\n* Did you also try this on another platform? Does it work there? `Surely it is a cross platform bug`\r\n\r\n\r\n> * try the latest development version, using the following command: \r\n> \r\n> ```shell\r\n> pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip\r\n> ```\r\n> \r\n> * follow *all* the instructions in our \"If Things Go Wrong\" Guide\r\n> (https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and\r\n> \r\n> ### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly)\r\n> \r\n> * [ ] start with clean installation\r\n> * [ ] use the latest development version\r\n> * [ ] Run your frozen program **from a command window (shell)** \u2014 instead of double-clicking on it\r\n> * [ ] Package your program in **--onedir mode**\r\n> * [ ] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file\r\n> * [ ] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file.\r\n> \r\nThis part of the template is irrelevant, as the responsible code is unchanged on `develop`\r\n\r\n### A minimal example program which shows the error\r\n\r\n```\r\npip install --pre matplotlib==3.4.0rc1 pyinstaller\r\necho \"import matplotlib\" > test.py\r\npyinstaller test.py\r\n```\r\n\r\n### Stacktrace / full error message\r\nBuilding `pyinstaller test.py`:\r\n```\r\n20391 INFO: Loading module hook 'hook-matplotlib.py' from 'XXXXXXX'...\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\nAttributeError: module 'matplotlib' has no attribute '_get_data_path'\r\n\r\n```\r\nRunning `test.exe`:\r\n```\r\nTraceback (most recent call last):\r\n File \"test.py\", line 1, in <module>\r\n import matplotlib\r\n File \"<frozen importlib._bootstrap>\", line 991, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 975, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 671, in _load_unlocked\r\n File \"PyInstaller\\loader\\pyimod03_importers.py\", line 531, in exec_module\r\n File \"matplotlib\\__init__.py\", line 820, in <module>\r\n File \"matplotlib\\__init__.py\", line 725, in _rc_params_in_file\r\n File \"contextlib.py\", line 113, in __enter__\r\n File \"matplotlib\\__init__.py\", line 703, in _open_file_or_url\r\nFileNotFoundError: [Errno 2] No such file or directory: 'xxxxx\\\\matplotlib\\\\mpl-data\\\\matplotlibrc'\r\n[2688] Failed to execute script test\r\n\r\n```\r\n\r\n## Possible resolution\r\n\r\nSimply remove the first underscore in `matplotlib._get_data_path` in \r\n\r\nhttps://github.com/pyinstaller/pyinstaller/blob/b9fcbbf86bc71addafc830debe289e7edb2a5697/PyInstaller/hooks/hook-matplotlib.py#L16\r\n\r\nThis works on my system.\r\n\r\nI'm a little confused as to why the private function was being used in the first place. `matplotlib.get_data_path` has been available for some time.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n\nfrom PyInstaller.utils.hooks import exec_statement\n\nmpl_data_dir = exec_statement(\n \"import matplotlib; print(matplotlib._get_data_path())\")\n\ndatas = [\n (mpl_data_dir, \"matplotlib/mpl-data\"),\n]\n", "path": "PyInstaller/hooks/hook-matplotlib.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n\nfrom PyInstaller.utils.hooks import exec_statement\n\nmpl_data_dir = exec_statement(\n \"import matplotlib; print(matplotlib.get_data_path())\")\n\ndatas = [\n (mpl_data_dir, \"matplotlib/mpl-data\"),\n]\n", "path": "PyInstaller/hooks/hook-matplotlib.py"}]} | 1,449 | 126 |
gh_patches_debug_22927 | rasdani/github-patches | git_diff | beeware__toga-1193 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImageView only works with square images
I created a hello world app following this tutorial under Linux Mint 20: [https://docs.beeware.org/en/latest/tutorial/tutorial-0.html](https://docs.beeware.org/en/latest/tutorial/tutorial-0.html)
My python version is 3.8.5
When trying to add images some work and some not.
This code doesn't work:
`image_from_url = toga.Image("https://dummyimage.com/100x67/000/fff")
imageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=67))`
And this is working fine:
`image_from_url = toga.Image("https://dummyimage.com/100x100/000/fff")
imageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=100))`
The error I get is:
> (__main__.py:67130): GdkPixbuf-CRITICAL **: 16:12:00.644: gdk_pixbuf_scale_simple: assertion 'dest_height > 0' failed
> ... TypeError: Argument 0 does not allow None as a value
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/gtk/toga_gtk/widgets/imageview.py`
Content:
```
1 from ..libs import GdkPixbuf, Gtk, Gdk
2 from .base import Widget
3
4
5 class ImageView(Widget):
6 def create(self):
7 self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
8 self._image = Gtk.Image()
9 self._pixbuf = None
10 self.native.add(self._image)
11 self.native.interface = self.interface
12
13 def set_image(self, image):
14 self._pixbuf = image._impl.native
15
16 def set_bounds(self, x, y, width, height):
17 super().set_bounds(x, y, width, height)
18 # rehint to update scaling of pixbuf
19 self.rehint()
20
21 def rehint(self):
22 if self._pixbuf:
23 height, width = self._resize_max(
24 original_height=self._pixbuf.get_height(),
25 original_width=self._pixbuf.get_width(),
26 max_height=self.native.get_allocated_height(),
27 max_width=self.native.get_allocated_width(),
28 )
29
30 dpr = self.native.get_scale_factor()
31
32 scaled_pixbuf = self._pixbuf.scale_simple(
33 width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR
34 )
35
36 surface = Gdk.cairo_surface_create_from_pixbuf(
37 scaled_pixbuf, 0, self.native.get_window() # scale: 0 = same as window
38 )
39 self._image.set_from_surface(surface)
40
41 @staticmethod
42 def _resize_max(original_height, original_width, max_height, max_width):
43
44 # Check to make sure all dimensions have valid sizes
45 if min(original_height, original_width, max_height, max_width) <= 0:
46 return 1, 1
47
48 width_ratio = max_width / original_width
49 height_ratio = max_height / original_height
50
51 height = original_height * width_ratio
52 if height <= max_height:
53 width = original_width * width_ratio
54 else:
55 height = original_height * height_ratio
56 width = original_width * height_ratio
57
58 return int(height), int(width)
59
```
Path: `examples/imageview/imageview/app.py`
Content:
```
1 import toga
2 from toga.style.pack import CENTER, COLUMN
3
4
5 class ImageViewApp(toga.App):
6 def startup(self):
7 self.main_window = toga.MainWindow(title=self.name)
8
9 box = toga.Box()
10 box.style.padding = 40
11 box.style.update(alignment=CENTER)
12 box.style.update(direction=COLUMN)
13
14 # image from local path
15 # load brutus.png from the package
16 # We set the style width/height parameters for this one
17 image_from_path = toga.Image('resources/brutus.png')
18 imageview_from_path = toga.ImageView(image_from_path)
19 imageview_from_path.style.update(height=72)
20 imageview_from_path.style.update(width=72)
21 box.add(imageview_from_path)
22
23 # image from remote URL
24 # no style parameters - we let Pack determine how to allocate
25 # the space
26 image_from_url = toga.Image('https://beeware.org/project/projects/libraries/toga/toga.png')
27 imageview_from_url = toga.ImageView(image_from_url)
28 box.add(imageview_from_url)
29
30 self.main_window.content = box
31 self.main_window.show()
32
33
34 def main():
35 return ImageViewApp('ImageView', 'org.beeware.widgets.imageview')
36
37
38 if __name__ == '__main__':
39 app = main()
40 app.main_loop()
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/imageview/imageview/app.py b/examples/imageview/imageview/app.py
--- a/examples/imageview/imageview/app.py
+++ b/examples/imageview/imageview/app.py
@@ -14,10 +14,9 @@
# image from local path
# load brutus.png from the package
# We set the style width/height parameters for this one
- image_from_path = toga.Image('resources/brutus.png')
+ image_from_path = toga.Image('resources/pride-brutus.png')
imageview_from_path = toga.ImageView(image_from_path)
imageview_from_path.style.update(height=72)
- imageview_from_path.style.update(width=72)
box.add(imageview_from_path)
# image from remote URL
diff --git a/src/gtk/toga_gtk/widgets/imageview.py b/src/gtk/toga_gtk/widgets/imageview.py
--- a/src/gtk/toga_gtk/widgets/imageview.py
+++ b/src/gtk/toga_gtk/widgets/imageview.py
@@ -55,4 +55,8 @@
height = original_height * height_ratio
width = original_width * height_ratio
- return int(height), int(width)
+ # On the first display the allocated height/width will be 1x1.
+ # If the image isn't square, this will result in one of the dimensions
+ # scaling to 0, which breaks GTK. So; constraint the minimum height
+ # and width to 1.
+ return max(int(height), 1), max(int(width), 1)
| {"golden_diff": "diff --git a/examples/imageview/imageview/app.py b/examples/imageview/imageview/app.py\n--- a/examples/imageview/imageview/app.py\n+++ b/examples/imageview/imageview/app.py\n@@ -14,10 +14,9 @@\n # image from local path\n # load brutus.png from the package\n # We set the style width/height parameters for this one\n- image_from_path = toga.Image('resources/brutus.png')\n+ image_from_path = toga.Image('resources/pride-brutus.png')\n imageview_from_path = toga.ImageView(image_from_path)\n imageview_from_path.style.update(height=72)\n- imageview_from_path.style.update(width=72)\n box.add(imageview_from_path)\n \n # image from remote URL\ndiff --git a/src/gtk/toga_gtk/widgets/imageview.py b/src/gtk/toga_gtk/widgets/imageview.py\n--- a/src/gtk/toga_gtk/widgets/imageview.py\n+++ b/src/gtk/toga_gtk/widgets/imageview.py\n@@ -55,4 +55,8 @@\n height = original_height * height_ratio\n width = original_width * height_ratio\n \n- return int(height), int(width)\n+ # On the first display the allocated height/width will be 1x1.\n+ # If the image isn't square, this will result in one of the dimensions\n+ # scaling to 0, which breaks GTK. So; constraint the minimum height\n+ # and width to 1.\n+ return max(int(height), 1), max(int(width), 1)\n", "issue": "ImageView only works with square images\nI created a hello world app following this tutorial under Linux Mint 20: [https://docs.beeware.org/en/latest/tutorial/tutorial-0.html](https://docs.beeware.org/en/latest/tutorial/tutorial-0.html)\r\nMy python version is 3.8.5\r\nWhen trying to add images some work and some not.\r\n\r\nThis code doesn't work:\r\n`image_from_url = toga.Image(\"https://dummyimage.com/100x67/000/fff\")\r\nimageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=67))`\r\n\r\nAnd this is working fine:\r\n`image_from_url = toga.Image(\"https://dummyimage.com/100x100/000/fff\")\r\nimageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=100))`\r\n\r\nThe error I get is:\r\n\r\n> (__main__.py:67130): GdkPixbuf-CRITICAL **: 16:12:00.644: gdk_pixbuf_scale_simple: assertion 'dest_height > 0' failed\r\n> ... TypeError: Argument 0 does not allow None as a value\r\n\n", "before_files": [{"content": "from ..libs import GdkPixbuf, Gtk, Gdk\nfrom .base import Widget\n\n\nclass ImageView(Widget):\n def create(self):\n self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self._image = Gtk.Image()\n self._pixbuf = None\n self.native.add(self._image)\n self.native.interface = self.interface\n\n def set_image(self, image):\n self._pixbuf = image._impl.native\n\n def set_bounds(self, x, y, width, height):\n super().set_bounds(x, y, width, height)\n # rehint to update scaling of pixbuf\n self.rehint()\n\n def rehint(self):\n if self._pixbuf:\n height, width = self._resize_max(\n original_height=self._pixbuf.get_height(),\n original_width=self._pixbuf.get_width(),\n max_height=self.native.get_allocated_height(),\n max_width=self.native.get_allocated_width(),\n )\n\n dpr = self.native.get_scale_factor()\n\n scaled_pixbuf = self._pixbuf.scale_simple(\n width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR\n )\n\n surface = Gdk.cairo_surface_create_from_pixbuf(\n scaled_pixbuf, 0, self.native.get_window() # scale: 0 = same as window\n )\n self._image.set_from_surface(surface)\n\n @staticmethod\n def _resize_max(original_height, original_width, max_height, max_width):\n\n # Check to make sure all dimensions have valid sizes\n if min(original_height, original_width, max_height, max_width) <= 0:\n return 1, 1\n\n width_ratio = max_width / original_width\n height_ratio = max_height / original_height\n\n height = original_height * width_ratio\n if height <= max_height:\n width = original_width * width_ratio\n else:\n height = original_height * height_ratio\n width = original_width * height_ratio\n\n return int(height), int(width)\n", "path": "src/gtk/toga_gtk/widgets/imageview.py"}, {"content": "import toga\nfrom toga.style.pack import CENTER, COLUMN\n\n\nclass ImageViewApp(toga.App):\n def startup(self):\n self.main_window = toga.MainWindow(title=self.name)\n\n box = toga.Box()\n box.style.padding = 40\n box.style.update(alignment=CENTER)\n box.style.update(direction=COLUMN)\n\n # image from local path\n # load brutus.png from the package\n # We set the style width/height parameters for this one\n image_from_path = toga.Image('resources/brutus.png')\n imageview_from_path = toga.ImageView(image_from_path)\n imageview_from_path.style.update(height=72)\n imageview_from_path.style.update(width=72)\n box.add(imageview_from_path)\n\n # image from remote URL\n # no style parameters - we let Pack determine how to allocate\n # the space\n image_from_url = toga.Image('https://beeware.org/project/projects/libraries/toga/toga.png')\n imageview_from_url = toga.ImageView(image_from_url)\n box.add(imageview_from_url)\n\n self.main_window.content = box\n self.main_window.show()\n\n\ndef main():\n return ImageViewApp('ImageView', 'org.beeware.widgets.imageview')\n\n\nif __name__ == '__main__':\n app = main()\n app.main_loop()\n", "path": "examples/imageview/imageview/app.py"}], "after_files": [{"content": "from ..libs import GdkPixbuf, Gtk, Gdk\nfrom .base import Widget\n\n\nclass ImageView(Widget):\n def create(self):\n self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self._image = Gtk.Image()\n self._pixbuf = None\n self.native.add(self._image)\n self.native.interface = self.interface\n\n def set_image(self, image):\n self._pixbuf = image._impl.native\n\n def set_bounds(self, x, y, width, height):\n super().set_bounds(x, y, width, height)\n # rehint to update scaling of pixbuf\n self.rehint()\n\n def rehint(self):\n if self._pixbuf:\n height, width = self._resize_max(\n original_height=self._pixbuf.get_height(),\n original_width=self._pixbuf.get_width(),\n max_height=self.native.get_allocated_height(),\n max_width=self.native.get_allocated_width(),\n )\n\n dpr = self.native.get_scale_factor()\n\n scaled_pixbuf = self._pixbuf.scale_simple(\n width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR\n )\n\n surface = Gdk.cairo_surface_create_from_pixbuf(\n scaled_pixbuf, 0, self.native.get_window() # scale: 0 = same as window\n )\n self._image.set_from_surface(surface)\n\n @staticmethod\n def _resize_max(original_height, original_width, max_height, max_width):\n\n # Check to make sure all dimensions have valid sizes\n if min(original_height, original_width, max_height, max_width) <= 0:\n return 1, 1\n\n width_ratio = max_width / original_width\n height_ratio = max_height / original_height\n\n height = original_height * width_ratio\n if height <= max_height:\n width = original_width * width_ratio\n else:\n height = original_height * height_ratio\n width = original_width * height_ratio\n\n # On the first display the allocated height/width will be 1x1.\n # If the image isn't square, this will result in one of the dimensions\n # scaling to 0, which breaks GTK. So; constraint the minimum height\n # and width to 1.\n return max(int(height), 1), max(int(width), 1)\n", "path": "src/gtk/toga_gtk/widgets/imageview.py"}, {"content": "import toga\nfrom toga.style.pack import CENTER, COLUMN\n\n\nclass ImageViewApp(toga.App):\n def startup(self):\n self.main_window = toga.MainWindow(title=self.name)\n\n box = toga.Box()\n box.style.padding = 40\n box.style.update(alignment=CENTER)\n box.style.update(direction=COLUMN)\n\n # image from local path\n # load brutus.png from the package\n # We set the style width/height parameters for this one\n image_from_path = toga.Image('resources/pride-brutus.png')\n imageview_from_path = toga.ImageView(image_from_path)\n imageview_from_path.style.update(height=72)\n box.add(imageview_from_path)\n\n # image from remote URL\n # no style parameters - we let Pack determine how to allocate\n # the space\n image_from_url = toga.Image('https://beeware.org/project/projects/libraries/toga/toga.png')\n imageview_from_url = toga.ImageView(image_from_url)\n box.add(imageview_from_url)\n\n self.main_window.content = box\n self.main_window.show()\n\n\ndef main():\n return ImageViewApp('ImageView', 'org.beeware.widgets.imageview')\n\n\nif __name__ == '__main__':\n app = main()\n app.main_loop()\n", "path": "examples/imageview/imageview/app.py"}]} | 1,472 | 351 |
gh_patches_debug_40167 | rasdani/github-patches | git_diff | mosaicml__composer-534 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect setting of persistent_workers hparam
DataloaderHparams:
`persistent_workers: bool = hp.optional("Whether to shutdown workers after the dataset has been consumed once", default=True)`
This makes it sound like the default option which is True shuts down the workers after the dataset has been consumed once. But when calling torch Dataloader, the default is False and this option keeps the workers alive.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `composer/datasets/dataloader.py`
Content:
```
1 # Copyright 2021 MosaicML. All Rights Reserved.
2
3 from __future__ import annotations
4
5 import logging
6 import textwrap
7 from dataclasses import dataclass
8 from typing import Any, Callable, Iterator, Optional
9
10 import torch
11 import torch.distributed
12 import torch.utils.data
13 import yahp as hp
14
15 from composer.core.types import Batch, DataLoader, Dataset
16
17 log = logging.getLogger(__name__)
18
19
20 class WrappedDataLoader(DataLoader):
21
22 def __init__(self, dataloader: DataLoader) -> None:
23 if self.is_dataloader_already_wrapped(dataloader):
24 log.debug(
25 textwrap.dedent("""\
26 The dataloader is already wrapped with %s; it will be wrapped again.
27 If this is unintended behavior, guard the wrapping of the dataloader i.e. with:
28 if not %s.is_dataloader_already_wrapped(dataloader): dataloader = %s(dataloader)"""),
29 type(self).__name__,
30 type(self).__name__,
31 type(self).__name__,
32 )
33 self.dataset = dataloader.dataset
34 self.batch_size = dataloader.batch_size
35 self.num_workers = dataloader.num_workers
36 self.pin_memory = dataloader.pin_memory
37 self.drop_last = dataloader.drop_last
38 self.timeout = dataloader.timeout
39 self.sampler = dataloader.sampler
40 self.prefetch_factor = dataloader.prefetch_factor
41 self.dataloader = dataloader
42
43 def __len__(self) -> int:
44 return len(self.dataloader)
45
46 def __iter__(self) -> Iterator[Batch]:
47 return iter(self.dataloader)
48
49 def __bool__(self) -> bool:
50 return True
51
52 def __setattr__(self, name: str, value: Any) -> None:
53 if hasattr(self, name) and name in ("dataset", "batch_size", "num_workers", "pin_memory", "drop_last",
54 "timeout", "sampler", "prefetch_factor", "dataloader"):
55 raise RuntimeError(f"Property {name} cannot be set after initialization in a DataLoader")
56 return super().__setattr__(name, value)
57
58 @classmethod
59 def is_dataloader_already_wrapped(cls, dataloader: DataLoader):
60 """Returns whether the ``dataloader`` is wrapped with ``cls``. This helper method checks recursively through all
61 wrappings until the underlying dataloader is reached.
62
63 Args:
64 dataloader (DataLoader): The dataloader to check
65
66 Returns:
67 bool: Whether the ``dataloader`` is wrapped recursively with ``cls``.
68 """
69 if isinstance(dataloader, cls):
70 return True
71 if not isinstance(dataloader, WrappedDataLoader):
72 return False
73 if not isinstance(dataloader.dataloader, WrappedDataLoader):
74 return False
75 return cls.is_dataloader_already_wrapped(dataloader.dataloader)
76
77
78 def unwrap_data_loader(dataloader: DataLoader) -> DataLoader:
79 """Recursively unwraps a dataloader if it is of type :class:`WrappedDataLoader`.
80
81 Args:
82 dataloader (DataLoader): The dataloader to unwrap
83
84 Returns:
85 DataLoader: The underlying dataloader
86 """
87 if isinstance(dataloader, WrappedDataLoader):
88 return unwrap_data_loader(dataloader.dataloader)
89 return dataloader
90
91
92 @dataclass
93 class DataloaderHparams(hp.Hparams):
94 """Hyperparameters to initialize a :class:`~torch.utils.data.Dataloader`.
95
96 Parameters:
97 num_workers (int): Number of CPU workers to use per device to fetch data.
98 prefetch_factor (int): Number of samples loaded in advance by each worker.
99 2 means there will be a total of 2 * num_workers samples prefetched across all workers.
100 persistent_workers (bool): Whether or not to shutdown workers after the dataset has been consumed once.
101 pin_memory (bool): Whether or not to copy Tensors into CUDA pinned memory before returning them.
102 timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout.
103 """
104
105 num_workers: int = hp.optional("Number of CPU workers to use per device to fetch data.", default=8)
106 prefetch_factor: int = hp.optional("Number of samples loaded in advance by each worker", default=2)
107 persistent_workers: bool = hp.optional("Whether to shutdown workers after the dataset has been consumed once",
108 default=True)
109 pin_memory: bool = hp.optional("Whether to copy Tensors into CUDA pinned memory before returning them",
110 default=True)
111 timeout: float = hp.optional("Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout",
112 default=0)
113
114 def initialize_object(
115 self,
116 dataset: Dataset,
117 *,
118 batch_size: int,
119 sampler: Optional[torch.utils.data.Sampler[int]],
120 drop_last: bool,
121 collate_fn: Optional[Callable] = None,
122 worker_init_fn: Optional[Callable] = None,
123 ) -> DataLoader:
124 """Create a dataloader.
125
126 Args:
127 dataset (Dataset): The dataset.
128 batch_size (int): The per-device batch size.
129 sampler (torch.utils.data.Sampler[int] or None): The sampler to use for the dataloader.
130 drop_last (bool): Whether to drop the last batch if the number of
131 samples is not evenly divisible by the batch size.
132 collate_fn (callable, optional): Custom collate function. Defaults to None.
133 worker_init_fn (callable, optional): Custom worker init function. Defaults to None.
134
135 Returns:
136 DataLoader: The dataloader.
137 """
138
139 return torch.utils.data.DataLoader(dataset,
140 batch_size=batch_size,
141 num_workers=self.num_workers,
142 pin_memory=self.pin_memory,
143 drop_last=drop_last,
144 sampler=sampler,
145 collate_fn=collate_fn,
146 worker_init_fn=worker_init_fn,
147 timeout=self.timeout,
148 prefetch_factor=self.prefetch_factor,
149 persistent_workers=self.persistent_workers)
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/composer/datasets/dataloader.py b/composer/datasets/dataloader.py
--- a/composer/datasets/dataloader.py
+++ b/composer/datasets/dataloader.py
@@ -94,22 +94,42 @@
"""Hyperparameters to initialize a :class:`~torch.utils.data.Dataloader`.
Parameters:
- num_workers (int): Number of CPU workers to use per device to fetch data.
- prefetch_factor (int): Number of samples loaded in advance by each worker.
- 2 means there will be a total of 2 * num_workers samples prefetched across all workers.
- persistent_workers (bool): Whether or not to shutdown workers after the dataset has been consumed once.
- pin_memory (bool): Whether or not to copy Tensors into CUDA pinned memory before returning them.
- timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout.
+ num_workers (int, optional): Number of CPU workers to use per device to fetch data.
+ Set to ``0`` to use the main training thread for dataloading.
+ While zero workers can be useful for debugging, it should not be used for performance reasons.
+ (default: ``8``)
+ prefetch_factor (int, optional): Number of samples loaded in advance by each worker.
+ For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers.
+ If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value.
+ (default: ``2``)
+ persistent_workers (bool): Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0,
+ then this field must be ``False``. (default: ``True``)
+ pin_memory (bool, optional): Whether or not to copy Tensors into CUDA pinned memory before returning them.
+ If ``num_workers = 0``, then the ``pin_memory`` must be ``False``. (default: ``True``)
+ timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout.
+ (default: ``0``)
"""
- num_workers: int = hp.optional("Number of CPU workers to use per device to fetch data.", default=8)
- prefetch_factor: int = hp.optional("Number of samples loaded in advance by each worker", default=2)
- persistent_workers: bool = hp.optional("Whether to shutdown workers after the dataset has been consumed once",
+ num_workers: int = hp.optional(textwrap.dedent("""\
+ Number of CPU workers to use per device to fetch data.
+ Set to ``0`` to use the main training thread for dataloading.
+ While zero workers can be useful for debugging, it should not be used for performance reasons."""),
+ default=8)
+ prefetch_factor: int = hp.optional(textwrap.dedent("""\
+ Number of samples loaded in advance by each worker.
+ For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers.
+ If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value."""),
+ default=2)
+ persistent_workers: bool = hp.optional(textwrap.dedent("""\
+ Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0,
+ then this field must be ``False``"""),
default=True)
- pin_memory: bool = hp.optional("Whether to copy Tensors into CUDA pinned memory before returning them",
+ pin_memory: bool = hp.optional(textwrap.dedent("""\
+ Whether or not to copy Tensors into CUDA pinned memory before returning them.
+ If ``num_workers = 0``, then the ``pin_memory`` must be ``False``."""),
default=True)
- timeout: float = hp.optional("Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout",
- default=0)
+ timeout: float = hp.optional(
+ "Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout.", default=0)
def initialize_object(
self,
| {"golden_diff": "diff --git a/composer/datasets/dataloader.py b/composer/datasets/dataloader.py\n--- a/composer/datasets/dataloader.py\n+++ b/composer/datasets/dataloader.py\n@@ -94,22 +94,42 @@\n \"\"\"Hyperparameters to initialize a :class:`~torch.utils.data.Dataloader`.\n \n Parameters:\n- num_workers (int): Number of CPU workers to use per device to fetch data.\n- prefetch_factor (int): Number of samples loaded in advance by each worker.\n- 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n- persistent_workers (bool): Whether or not to shutdown workers after the dataset has been consumed once.\n- pin_memory (bool): Whether or not to copy Tensors into CUDA pinned memory before returning them.\n- timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout.\n+ num_workers (int, optional): Number of CPU workers to use per device to fetch data.\n+ Set to ``0`` to use the main training thread for dataloading.\n+ While zero workers can be useful for debugging, it should not be used for performance reasons.\n+ (default: ``8``)\n+ prefetch_factor (int, optional): Number of samples loaded in advance by each worker.\n+ For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n+ If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value.\n+ (default: ``2``)\n+ persistent_workers (bool): Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0,\n+ then this field must be ``False``. (default: ``True``)\n+ pin_memory (bool, optional): Whether or not to copy Tensors into CUDA pinned memory before returning them.\n+ If ``num_workers = 0``, then the ``pin_memory`` must be ``False``. (default: ``True``)\n+ timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout.\n+ (default: ``0``)\n \"\"\"\n \n- num_workers: int = hp.optional(\"Number of CPU workers to use per device to fetch data.\", default=8)\n- prefetch_factor: int = hp.optional(\"Number of samples loaded in advance by each worker\", default=2)\n- persistent_workers: bool = hp.optional(\"Whether to shutdown workers after the dataset has been consumed once\",\n+ num_workers: int = hp.optional(textwrap.dedent(\"\"\"\\\n+ Number of CPU workers to use per device to fetch data.\n+ Set to ``0`` to use the main training thread for dataloading.\n+ While zero workers can be useful for debugging, it should not be used for performance reasons.\"\"\"),\n+ default=8)\n+ prefetch_factor: int = hp.optional(textwrap.dedent(\"\"\"\\\n+ Number of samples loaded in advance by each worker.\n+ For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n+ If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value.\"\"\"),\n+ default=2)\n+ persistent_workers: bool = hp.optional(textwrap.dedent(\"\"\"\\\n+ Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0,\n+ then this field must be ``False``\"\"\"),\n default=True)\n- pin_memory: bool = hp.optional(\"Whether to copy Tensors into CUDA pinned memory before returning them\",\n+ pin_memory: bool = hp.optional(textwrap.dedent(\"\"\"\\\n+ Whether or not to copy Tensors into CUDA pinned memory before returning them.\n+ If ``num_workers = 0``, then the ``pin_memory`` must be ``False``.\"\"\"),\n default=True)\n- timeout: float = hp.optional(\"Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout\",\n- default=0)\n+ timeout: float = hp.optional(\n+ \"Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout.\", default=0)\n \n def initialize_object(\n self,\n", "issue": "Incorrect setting of persistent_workers hparam\nDataloaderHparams:\r\n`persistent_workers: bool = hp.optional(\"Whether to shutdown workers after the dataset has been consumed once\", default=True)`\r\n\r\nThis makes it sound like the default option which is True shuts down the workers after the dataset has been consumed once. But when calling torch Dataloader, the default is False and this option keeps the workers alive.\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom __future__ import annotations\n\nimport logging\nimport textwrap\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Iterator, Optional\n\nimport torch\nimport torch.distributed\nimport torch.utils.data\nimport yahp as hp\n\nfrom composer.core.types import Batch, DataLoader, Dataset\n\nlog = logging.getLogger(__name__)\n\n\nclass WrappedDataLoader(DataLoader):\n\n def __init__(self, dataloader: DataLoader) -> None:\n if self.is_dataloader_already_wrapped(dataloader):\n log.debug(\n textwrap.dedent(\"\"\"\\\n The dataloader is already wrapped with %s; it will be wrapped again.\n If this is unintended behavior, guard the wrapping of the dataloader i.e. with:\n if not %s.is_dataloader_already_wrapped(dataloader): dataloader = %s(dataloader)\"\"\"),\n type(self).__name__,\n type(self).__name__,\n type(self).__name__,\n )\n self.dataset = dataloader.dataset\n self.batch_size = dataloader.batch_size\n self.num_workers = dataloader.num_workers\n self.pin_memory = dataloader.pin_memory\n self.drop_last = dataloader.drop_last\n self.timeout = dataloader.timeout\n self.sampler = dataloader.sampler\n self.prefetch_factor = dataloader.prefetch_factor\n self.dataloader = dataloader\n\n def __len__(self) -> int:\n return len(self.dataloader)\n\n def __iter__(self) -> Iterator[Batch]:\n return iter(self.dataloader)\n\n def __bool__(self) -> bool:\n return True\n\n def __setattr__(self, name: str, value: Any) -> None:\n if hasattr(self, name) and name in (\"dataset\", \"batch_size\", \"num_workers\", \"pin_memory\", \"drop_last\",\n \"timeout\", \"sampler\", \"prefetch_factor\", \"dataloader\"):\n raise RuntimeError(f\"Property {name} cannot be set after initialization in a DataLoader\")\n return super().__setattr__(name, value)\n\n @classmethod\n def is_dataloader_already_wrapped(cls, dataloader: DataLoader):\n \"\"\"Returns whether the ``dataloader`` is wrapped with ``cls``. This helper method checks recursively through all\n wrappings until the underlying dataloader is reached.\n\n Args:\n dataloader (DataLoader): The dataloader to check\n\n Returns:\n bool: Whether the ``dataloader`` is wrapped recursively with ``cls``.\n \"\"\"\n if isinstance(dataloader, cls):\n return True\n if not isinstance(dataloader, WrappedDataLoader):\n return False\n if not isinstance(dataloader.dataloader, WrappedDataLoader):\n return False\n return cls.is_dataloader_already_wrapped(dataloader.dataloader)\n\n\ndef unwrap_data_loader(dataloader: DataLoader) -> DataLoader:\n \"\"\"Recursively unwraps a dataloader if it is of type :class:`WrappedDataLoader`.\n\n Args:\n dataloader (DataLoader): The dataloader to unwrap\n\n Returns:\n DataLoader: The underlying dataloader\n \"\"\"\n if isinstance(dataloader, WrappedDataLoader):\n return unwrap_data_loader(dataloader.dataloader)\n return dataloader\n\n\n@dataclass\nclass DataloaderHparams(hp.Hparams):\n \"\"\"Hyperparameters to initialize a :class:`~torch.utils.data.Dataloader`.\n\n Parameters:\n num_workers (int): Number of CPU workers to use per device to fetch data.\n prefetch_factor (int): Number of samples loaded in advance by each worker.\n 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n persistent_workers (bool): Whether or not to shutdown workers after the dataset has been consumed once.\n pin_memory (bool): Whether or not to copy Tensors into CUDA pinned memory before returning them.\n timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout.\n \"\"\"\n\n num_workers: int = hp.optional(\"Number of CPU workers to use per device to fetch data.\", default=8)\n prefetch_factor: int = hp.optional(\"Number of samples loaded in advance by each worker\", default=2)\n persistent_workers: bool = hp.optional(\"Whether to shutdown workers after the dataset has been consumed once\",\n default=True)\n pin_memory: bool = hp.optional(\"Whether to copy Tensors into CUDA pinned memory before returning them\",\n default=True)\n timeout: float = hp.optional(\"Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout\",\n default=0)\n\n def initialize_object(\n self,\n dataset: Dataset,\n *,\n batch_size: int,\n sampler: Optional[torch.utils.data.Sampler[int]],\n drop_last: bool,\n collate_fn: Optional[Callable] = None,\n worker_init_fn: Optional[Callable] = None,\n ) -> DataLoader:\n \"\"\"Create a dataloader.\n\n Args:\n dataset (Dataset): The dataset.\n batch_size (int): The per-device batch size.\n sampler (torch.utils.data.Sampler[int] or None): The sampler to use for the dataloader.\n drop_last (bool): Whether to drop the last batch if the number of\n samples is not evenly divisible by the batch size.\n collate_fn (callable, optional): Custom collate function. Defaults to None.\n worker_init_fn (callable, optional): Custom worker init function. Defaults to None.\n\n Returns:\n DataLoader: The dataloader.\n \"\"\"\n\n return torch.utils.data.DataLoader(dataset,\n batch_size=batch_size,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n drop_last=drop_last,\n sampler=sampler,\n collate_fn=collate_fn,\n worker_init_fn=worker_init_fn,\n timeout=self.timeout,\n prefetch_factor=self.prefetch_factor,\n persistent_workers=self.persistent_workers)\n", "path": "composer/datasets/dataloader.py"}], "after_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom __future__ import annotations\n\nimport logging\nimport textwrap\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Iterator, Optional\n\nimport torch\nimport torch.distributed\nimport torch.utils.data\nimport yahp as hp\n\nfrom composer.core.types import Batch, DataLoader, Dataset\n\nlog = logging.getLogger(__name__)\n\n\nclass WrappedDataLoader(DataLoader):\n\n def __init__(self, dataloader: DataLoader) -> None:\n if self.is_dataloader_already_wrapped(dataloader):\n log.debug(\n textwrap.dedent(\"\"\"\\\n The dataloader is already wrapped with %s; it will be wrapped again.\n If this is unintended behavior, guard the wrapping of the dataloader i.e. with:\n if not %s.is_dataloader_already_wrapped(dataloader): dataloader = %s(dataloader)\"\"\"),\n type(self).__name__,\n type(self).__name__,\n type(self).__name__,\n )\n self.dataset = dataloader.dataset\n self.batch_size = dataloader.batch_size\n self.num_workers = dataloader.num_workers\n self.pin_memory = dataloader.pin_memory\n self.drop_last = dataloader.drop_last\n self.timeout = dataloader.timeout\n self.sampler = dataloader.sampler\n self.prefetch_factor = dataloader.prefetch_factor\n self.dataloader = dataloader\n\n def __len__(self) -> int:\n return len(self.dataloader)\n\n def __iter__(self) -> Iterator[Batch]:\n return iter(self.dataloader)\n\n def __bool__(self) -> bool:\n return True\n\n def __setattr__(self, name: str, value: Any) -> None:\n if hasattr(self, name) and name in (\"dataset\", \"batch_size\", \"num_workers\", \"pin_memory\", \"drop_last\",\n \"timeout\", \"sampler\", \"prefetch_factor\", \"dataloader\"):\n raise RuntimeError(f\"Property {name} cannot be set after initialization in a DataLoader\")\n return super().__setattr__(name, value)\n\n @classmethod\n def is_dataloader_already_wrapped(cls, dataloader: DataLoader):\n \"\"\"Returns whether the ``dataloader`` is wrapped with ``cls``. This helper method checks recursively through all\n wrappings until the underlying dataloader is reached.\n\n Args:\n dataloader (DataLoader): The dataloader to check\n\n Returns:\n bool: Whether the ``dataloader`` is wrapped recursively with ``cls``.\n \"\"\"\n if isinstance(dataloader, cls):\n return True\n if not isinstance(dataloader, WrappedDataLoader):\n return False\n if not isinstance(dataloader.dataloader, WrappedDataLoader):\n return False\n return cls.is_dataloader_already_wrapped(dataloader.dataloader)\n\n\ndef unwrap_data_loader(dataloader: DataLoader) -> DataLoader:\n \"\"\"Recursively unwraps a dataloader if it is of type :class:`WrappedDataLoader`.\n\n Args:\n dataloader (DataLoader): The dataloader to unwrap\n\n Returns:\n DataLoader: The underlying dataloader\n \"\"\"\n if isinstance(dataloader, WrappedDataLoader):\n return unwrap_data_loader(dataloader.dataloader)\n return dataloader\n\n\n@dataclass\nclass DataloaderHparams(hp.Hparams):\n \"\"\"Hyperparameters to initialize a :class:`~torch.utils.data.Dataloader`.\n\n Parameters:\n num_workers (int, optional): Number of CPU workers to use per device to fetch data.\n Set to ``0`` to use the main training thread for dataloading.\n While zero workers can be useful for debugging, it should not be used for performance reasons.\n (default: ``8``)\n prefetch_factor (int, optional): Number of samples loaded in advance by each worker.\n For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value.\n (default: ``2``)\n persistent_workers (bool): Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0,\n then this field must be ``False``. (default: ``True``)\n pin_memory (bool, optional): Whether or not to copy Tensors into CUDA pinned memory before returning them.\n If ``num_workers = 0``, then the ``pin_memory`` must be ``False``. (default: ``True``)\n timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout.\n (default: ``0``)\n \"\"\"\n\n num_workers: int = hp.optional(textwrap.dedent(\"\"\"\\\n Number of CPU workers to use per device to fetch data.\n Set to ``0`` to use the main training thread for dataloading.\n While zero workers can be useful for debugging, it should not be used for performance reasons.\"\"\"),\n default=8)\n prefetch_factor: int = hp.optional(textwrap.dedent(\"\"\"\\\n Number of samples loaded in advance by each worker.\n For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value.\"\"\"),\n default=2)\n persistent_workers: bool = hp.optional(textwrap.dedent(\"\"\"\\\n Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0,\n then this field must be ``False``\"\"\"),\n default=True)\n pin_memory: bool = hp.optional(textwrap.dedent(\"\"\"\\\n Whether or not to copy Tensors into CUDA pinned memory before returning them.\n If ``num_workers = 0``, then the ``pin_memory`` must be ``False``.\"\"\"),\n default=True)\n timeout: float = hp.optional(\n \"Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout.\", default=0)\n\n def initialize_object(\n self,\n dataset: Dataset,\n *,\n batch_size: int,\n sampler: Optional[torch.utils.data.Sampler[int]],\n drop_last: bool,\n collate_fn: Optional[Callable] = None,\n worker_init_fn: Optional[Callable] = None,\n ) -> DataLoader:\n \"\"\"Create a dataloader.\n\n Args:\n dataset (Dataset): The dataset.\n batch_size (int): The per-device batch size.\n sampler (torch.utils.data.Sampler[int] or None): The sampler to use for the dataloader.\n drop_last (bool): Whether to drop the last batch if the number of\n samples is not evenly divisible by the batch size.\n collate_fn (callable, optional): Custom collate function. Defaults to None.\n worker_init_fn (callable, optional): Custom worker init function. Defaults to None.\n\n Returns:\n DataLoader: The dataloader.\n \"\"\"\n\n return torch.utils.data.DataLoader(dataset,\n batch_size=batch_size,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n drop_last=drop_last,\n sampler=sampler,\n collate_fn=collate_fn,\n worker_init_fn=worker_init_fn,\n timeout=self.timeout,\n prefetch_factor=self.prefetch_factor,\n persistent_workers=self.persistent_workers)\n", "path": "composer/datasets/dataloader.py"}]} | 1,975 | 934 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.