problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_2654
rasdani/github-patches
git_diff
microsoft__nni-5155
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unclear what extras to install: `import nni.retiarii.execution.api` fails due to missing `pytorch_lightning` **Describe the issue**: I want to use `nni.retiarii.execution.api` module. I've installed it as below: ``` Collecting nni>=2.3 Downloading nni-2.9-py3-none-manylinux1_x86_64.whl (56.0 MB) ``` **Environment**: - NNI version: 2.9 - Python version: 3.8 **Log message**: ``` _________________ ERROR collecting test/3rd_party/test_nni.py __________________ ImportError while importing test module '/__w/ai4cl-tianshou/ai4cl-tianshou/test/3rd_party/test_nni.py'. Hint: make sure your test modules/packages have valid Python names. Traceback: /usr/local/lib/python3.8/importlib/__init__.py:127: in import_module return _bootstrap._gcd_import(name[level:], package, level) test/3rd_party/test_nni.py:8: in <module> import nni.retiarii.execution.api /usr/local/lib/python3.8/site-packages/nni/retiarii/__init__.py:4: in <module> from .operation import Operation /usr/local/lib/python3.8/site-packages/nni/retiarii/operation.py:6: in <module> from nni.nas.execution.common.graph_op import * /usr/local/lib/python3.8/site-packages/nni/nas/__init__.py:4: in <module> from .execution import * /usr/local/lib/python3.8/site-packages/nni/nas/execution/__init__.py:4: in <module> from .api import * /usr/local/lib/python3.8/site-packages/nni/nas/execution/api.py:9: in <module> from nni.nas.execution.common import ( /usr/local/lib/python3.8/site-packages/nni/nas/execution/common/__init__.py:4: in <module> from .engine import * /usr/local/lib/python3.8/site-packages/nni/nas/execution/common/engine.py:7: in <module> from .graph import Model, MetricData /usr/local/lib/python3.8/site-packages/nni/nas/execution/common/graph.py:18: in <module> from nni.nas.evaluator import Evaluator /usr/local/lib/python3.8/site-packages/nni/nas/evaluator/__init__.py:9: in <module> shortcut_framework(__name__) /usr/local/lib/python3.8/site-packages/nni/common/framework.py:93: in shortcut_framework shortcut_module(current, '.' + get_default_framework(), current) /usr/local/lib/python3.8/site-packages/nni/common/framework.py:83: in shortcut_module mod = importlib.import_module(target, package) /usr/local/lib/python3.8/importlib/__init__.py:127: in import_module return _bootstrap._gcd_import(name[level:], package, level) /usr/local/lib/python3.8/site-packages/nni/nas/evaluator/pytorch/__init__.py:4: in <module> from .lightning import * /usr/local/lib/python3.8/site-packages/nni/nas/evaluator/pytorch/lightning.py:10: in <module> import pytorch_lightning as pl E ModuleNotFoundError: No module named 'pytorch_lightning' ``` **How to reproduce it?**: ``` pip install nni==2.9 python -c "import nni.retiarii.execution.api" ``` </issue> <code> [start of nni/nas/evaluator/pytorch/__init__.py] 1 # Copyright (c) Microsoft Corporation. 2 # Licensed under the MIT license. 3 4 from .lightning import * 5 [end of nni/nas/evaluator/pytorch/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nni/nas/evaluator/pytorch/__init__.py b/nni/nas/evaluator/pytorch/__init__.py --- a/nni/nas/evaluator/pytorch/__init__.py +++ b/nni/nas/evaluator/pytorch/__init__.py @@ -1,4 +1,11 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT license. -from .lightning import * +import warnings + +try: + from .lightning import * +except ImportError: + warnings.warn("PyTorch-Lightning must be installed to use PyTorch in NAS. " + "If you are not using PyTorch, please `nni.set_default_framework('none')`") + raise
{"golden_diff": "diff --git a/nni/nas/evaluator/pytorch/__init__.py b/nni/nas/evaluator/pytorch/__init__.py\n--- a/nni/nas/evaluator/pytorch/__init__.py\n+++ b/nni/nas/evaluator/pytorch/__init__.py\n@@ -1,4 +1,11 @@\n # Copyright (c) Microsoft Corporation.\n # Licensed under the MIT license.\n \n-from .lightning import *\n+import warnings\n+\n+try:\n+ from .lightning import *\n+except ImportError:\n+ warnings.warn(\"PyTorch-Lightning must be installed to use PyTorch in NAS. \"\n+ \"If you are not using PyTorch, please `nni.set_default_framework('none')`\")\n+ raise\n", "issue": "Unclear what extras to install: `import nni.retiarii.execution.api` fails due to missing `pytorch_lightning`\n**Describe the issue**:\r\nI want to use `nni.retiarii.execution.api` module. I've installed it as below:\r\n```\r\nCollecting nni>=2.3\r\n Downloading nni-2.9-py3-none-manylinux1_x86_64.whl (56.0 MB)\r\n```\r\n\r\n**Environment**:\r\n- NNI version: 2.9\r\n- Python version: 3.8\r\n\r\n**Log message**:\r\n```\r\n_________________ ERROR collecting test/3rd_party/test_nni.py __________________\r\nImportError while importing test module '/__w/ai4cl-tianshou/ai4cl-tianshou/test/3rd_party/test_nni.py'.\r\nHint: make sure your test modules/packages have valid Python names.\r\nTraceback:\r\n/usr/local/lib/python3.8/importlib/__init__.py:127: in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\ntest/3rd_party/test_nni.py:8: in <module>\r\n import nni.retiarii.execution.api\r\n/usr/local/lib/python3.8/site-packages/nni/retiarii/__init__.py:4: in <module>\r\n from .operation import Operation\r\n/usr/local/lib/python3.8/site-packages/nni/retiarii/operation.py:6: in <module>\r\n from nni.nas.execution.common.graph_op import *\r\n/usr/local/lib/python3.8/site-packages/nni/nas/__init__.py:4: in <module>\r\n from .execution import *\r\n/usr/local/lib/python3.8/site-packages/nni/nas/execution/__init__.py:4: in <module>\r\n from .api import *\r\n/usr/local/lib/python3.8/site-packages/nni/nas/execution/api.py:9: in <module>\r\n from nni.nas.execution.common import (\r\n/usr/local/lib/python3.8/site-packages/nni/nas/execution/common/__init__.py:4: in <module>\r\n from .engine import *\r\n/usr/local/lib/python3.8/site-packages/nni/nas/execution/common/engine.py:7: in <module>\r\n from .graph import Model, MetricData\r\n/usr/local/lib/python3.8/site-packages/nni/nas/execution/common/graph.py:18: in <module>\r\n from nni.nas.evaluator import Evaluator\r\n/usr/local/lib/python3.8/site-packages/nni/nas/evaluator/__init__.py:9: in <module>\r\n shortcut_framework(__name__)\r\n/usr/local/lib/python3.8/site-packages/nni/common/framework.py:93: in shortcut_framework\r\n shortcut_module(current, '.' + get_default_framework(), current)\r\n/usr/local/lib/python3.8/site-packages/nni/common/framework.py:83: in shortcut_module\r\n mod = importlib.import_module(target, package)\r\n/usr/local/lib/python3.8/importlib/__init__.py:127: in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n/usr/local/lib/python3.8/site-packages/nni/nas/evaluator/pytorch/__init__.py:4: in <module>\r\n from .lightning import *\r\n/usr/local/lib/python3.8/site-packages/nni/nas/evaluator/pytorch/lightning.py:10: in <module>\r\n import pytorch_lightning as pl\r\nE ModuleNotFoundError: No module named 'pytorch_lightning'\r\n```\r\n\r\n**How to reproduce it?**:\r\n```\r\npip install nni==2.9\r\npython -c \"import nni.retiarii.execution.api\"\r\n```\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nfrom .lightning import *\n", "path": "nni/nas/evaluator/pytorch/__init__.py"}]}
1,388
173
gh_patches_debug_3315
rasdani/github-patches
git_diff
google__turbinia-1227
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create documentation for the Turbinia API command line tool Write documentation for the new Turbinia API command line tool. </issue> <code> [start of turbinia/api/cli/turbinia_client/core/groups.py] 1 # -*- coding: utf-8 -*- 2 # Copyright 2022 Google Inc. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 """Turbinia API client command-line tool.""" 16 17 import click 18 19 20 @click.group('config') 21 def config_group(): 22 """Get Turbinia configuration.""" 23 24 25 @click.group('status') 26 def status_group(): 27 """Get Turbinia request/task status.""" 28 29 30 @click.group('result') 31 def result_group(): 32 """Get Turbinia task or request results.""" 33 34 35 @click.group('jobs') 36 def jobs_group(): 37 """Get a list of enabled Turbinia jobs.""" 38 39 40 @click.group('submit') 41 def submit_group(): 42 """Submit new requests to the Turbinia API server.""" 43 [end of turbinia/api/cli/turbinia_client/core/groups.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/turbinia/api/cli/turbinia_client/core/groups.py b/turbinia/api/cli/turbinia_client/core/groups.py --- a/turbinia/api/cli/turbinia_client/core/groups.py +++ b/turbinia/api/cli/turbinia_client/core/groups.py @@ -24,12 +24,12 @@ @click.group('status') def status_group(): - """Get Turbinia request/task status.""" + """Get Turbinia request or task status.""" @click.group('result') def result_group(): - """Get Turbinia task or request results.""" + """Get Turbinia request or task results.""" @click.group('jobs')
{"golden_diff": "diff --git a/turbinia/api/cli/turbinia_client/core/groups.py b/turbinia/api/cli/turbinia_client/core/groups.py\n--- a/turbinia/api/cli/turbinia_client/core/groups.py\n+++ b/turbinia/api/cli/turbinia_client/core/groups.py\n@@ -24,12 +24,12 @@\n \n @click.group('status')\n def status_group():\n- \"\"\"Get Turbinia request/task status.\"\"\"\n+ \"\"\"Get Turbinia request or task status.\"\"\"\n \n \n @click.group('result')\n def result_group():\n- \"\"\"Get Turbinia task or request results.\"\"\"\n+ \"\"\"Get Turbinia request or task results.\"\"\"\n \n \n @click.group('jobs')\n", "issue": "Create documentation for the Turbinia API command line tool\nWrite documentation for the new Turbinia API command line tool.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2022 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Turbinia API client command-line tool.\"\"\"\n\nimport click\n\n\[email protected]('config')\ndef config_group():\n \"\"\"Get Turbinia configuration.\"\"\"\n\n\[email protected]('status')\ndef status_group():\n \"\"\"Get Turbinia request/task status.\"\"\"\n\n\[email protected]('result')\ndef result_group():\n \"\"\"Get Turbinia task or request results.\"\"\"\n\n\[email protected]('jobs')\ndef jobs_group():\n \"\"\"Get a list of enabled Turbinia jobs.\"\"\"\n\n\[email protected]('submit')\ndef submit_group():\n \"\"\"Submit new requests to the Turbinia API server.\"\"\"\n", "path": "turbinia/api/cli/turbinia_client/core/groups.py"}]}
920
159
gh_patches_debug_41518
rasdani/github-patches
git_diff
lutris__lutris-2373
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Nine: Lutris can't find Nine libraries A lot of changed before creating PR #2092 and merging it - biggest issue is commit 792c22176eff9e063b22d7b9700e2e9b79a11fae, which changes return val of iter_lib_folders() from lists to strings. I believe I used iter_lib_folders() in #2092 because I needed distinguish between lib32 and lib64 paths. I will take a look at this and try to fix it ASAP (this week). Original report: https://github.com/lutris/lutris/pull/2092#issuecomment-529362315 </issue> <code> [start of lutris/util/wine/nine.py] 1 """Gallium Nine helper module""" 2 import os 3 import shutil 4 5 from lutris.util import system 6 from lutris.runners.commands.wine import wineexec 7 8 class NineUnavailable(RuntimeError): 9 """Exception raised when Gallium Nine is not available""" 10 11 class NineManager: 12 """Utility class to install and manage Gallium Nine to a Wine prefix""" 13 14 nine_files = ("d3d9-nine.dll", "ninewinecfg.exe") 15 mesa_files = ("d3dadapter9.so.1",) 16 17 def __init__(self, prefix, arch): 18 self.prefix = prefix 19 self.wine_arch = arch 20 21 @staticmethod 22 def nine_is_supported(): 23 """Check if MESA is built with Gallium Nine state tracker support 24 25 basic check for presence of d3dadapter9 library in 'd3d' subdirectory 26 of system library directory 27 """ 28 for mesa_file in NineManager.mesa_files: 29 if not any([os.path.exists(os.path.join(lib[0], "d3d", mesa_file)) 30 for lib in system.LINUX_SYSTEM.iter_lib_folders()]): 31 return False 32 33 if system.LINUX_SYSTEM.is_64_bit: 34 if not any([os.path.exists(os.path.join(lib[1], "d3d", mesa_file)) 35 for lib in system.LINUX_SYSTEM.iter_lib_folders()]): 36 return False 37 38 return True 39 40 @staticmethod 41 def nine_is_installed(): 42 """Check if Gallium Nine standalone is installed on this system 43 44 check 'wine/fakedlls' subdirectory of system library directory for Nine binaries 45 """ 46 for nine_file in NineManager.nine_files: 47 if not any([os.path.exists(os.path.join(lib[0], "wine/fakedlls", nine_file)) 48 for lib in system.LINUX_SYSTEM.iter_lib_folders()]): 49 return False 50 51 if system.LINUX_SYSTEM.is_64_bit: 52 if not any([os.path.exists(os.path.join(lib[1], "wine/fakedlls", nine_file)) 53 for lib in system.LINUX_SYSTEM.iter_lib_folders()]): 54 return False 55 56 return True 57 58 @staticmethod 59 def is_available(): 60 """Check if Gallium Nine can be enabled on this system""" 61 return NineManager.nine_is_supported() and NineManager.nine_is_installed() 62 63 def get_system_path(self, arch): 64 """Return path of Windows system directory with binaries of chosen architecture""" 65 windows_path = os.path.join(self.prefix, "drive_c/windows") 66 67 if self.wine_arch == "win32" and arch == "x32": 68 return os.path.join(windows_path, "system32") 69 if self.wine_arch == "win64" and arch == "x32": 70 return os.path.join(windows_path, "syswow64") 71 if self.wine_arch == "win64" and arch == "x64": 72 return os.path.join(windows_path, "system32") 73 74 return None 75 76 def is_prefix_prepared(self): 77 if not all(system.path_exists(os.path.join(self.get_system_path("x32"), nine_file)) 78 for nine_file in self.nine_files): 79 return False 80 81 if self.wine_arch == "win64": 82 if not all(system.path_exists(os.path.join(self.get_system_path("x64"), nine_file)) 83 for nine_file in self.nine_files): 84 return False 85 86 return True 87 88 def prepare_prefix(self): 89 for nine_file in NineManager.nine_files: 90 for lib in system.LINUX_SYSTEM.iter_lib_folders(): 91 nine_file_32 = os.path.join(lib[0], "wine/fakedlls", nine_file) 92 if os.path.exists(nine_file_32): 93 shutil.copy(nine_file_32, self.get_system_path("x32")) 94 95 if self.wine_arch == "win64": 96 nine_file_64 = os.path.join(lib[1], "wine/fakedlls", nine_file) 97 if os.path.exists(nine_file_64): 98 shutil.copy(nine_file_64, self.get_system_path("x64")) 99 100 def enable(self): 101 if not self.nine_is_supported(): 102 raise NineUnavailable("Nine is not supported on this system") 103 if not self.nine_is_installed(): 104 raise NineUnavailable("Nine Standalone is not installed") 105 if not self.is_prefix_prepared(): 106 self.prepare_prefix() 107 108 wineexec( 109 "ninewinecfg", 110 args="-e", 111 prefix=self.prefix, 112 blocking=True, 113 ) 114 115 def disable(self): 116 if self.is_prefix_prepared(): 117 wineexec( 118 "ninewinecfg", 119 args="-d", 120 prefix=self.prefix, 121 blocking=True, 122 ) 123 [end of lutris/util/wine/nine.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lutris/util/wine/nine.py b/lutris/util/wine/nine.py --- a/lutris/util/wine/nine.py +++ b/lutris/util/wine/nine.py @@ -4,6 +4,7 @@ from lutris.util import system from lutris.runners.commands.wine import wineexec +from lutris.util.wine.cabinstall import CabInstaller class NineUnavailable(RuntimeError): """Exception raised when Gallium Nine is not available""" @@ -26,15 +27,10 @@ of system library directory """ for mesa_file in NineManager.mesa_files: - if not any([os.path.exists(os.path.join(lib[0], "d3d", mesa_file)) + if not any([os.path.exists(os.path.join(lib, "d3d", mesa_file)) for lib in system.LINUX_SYSTEM.iter_lib_folders()]): return False - if system.LINUX_SYSTEM.is_64_bit: - if not any([os.path.exists(os.path.join(lib[1], "d3d", mesa_file)) - for lib in system.LINUX_SYSTEM.iter_lib_folders()]): - return False - return True @staticmethod @@ -44,15 +40,10 @@ check 'wine/fakedlls' subdirectory of system library directory for Nine binaries """ for nine_file in NineManager.nine_files: - if not any([os.path.exists(os.path.join(lib[0], "wine/fakedlls", nine_file)) + if not any([os.path.exists(os.path.join(lib, "wine/fakedlls", nine_file)) for lib in system.LINUX_SYSTEM.iter_lib_folders()]): return False - if system.LINUX_SYSTEM.is_64_bit: - if not any([os.path.exists(os.path.join(lib[1], "wine/fakedlls", nine_file)) - for lib in system.LINUX_SYSTEM.iter_lib_folders()]): - return False - return True @staticmethod @@ -88,15 +79,24 @@ def prepare_prefix(self): for nine_file in NineManager.nine_files: for lib in system.LINUX_SYSTEM.iter_lib_folders(): - nine_file_32 = os.path.join(lib[0], "wine/fakedlls", nine_file) - if os.path.exists(nine_file_32): - shutil.copy(nine_file_32, self.get_system_path("x32")) + nine_file_path = os.path.join(lib, "wine/fakedlls", nine_file) + + if (os.path.exists(nine_file_path) and + CabInstaller.get_arch_from_dll(nine_file_path) == "win32"): + shutil.copy(nine_file_path, self.get_system_path("x32")) if self.wine_arch == "win64": - nine_file_64 = os.path.join(lib[1], "wine/fakedlls", nine_file) - if os.path.exists(nine_file_64): + if (os.path.exists(nine_file_path) and + CabInstaller.get_arch_from_dll(nine_file_path) == "win64"): shutil.copy(nine_file_64, self.get_system_path("x64")) + if not os.path.exists(os.path.join(self.get_system_path("x32"), nine_file)): + raise NineUnavailable("could not install " + nine_file + " (x32)") + + if self.wine_arch == "win64": + if not os.path.exists(os.path.join(self.get_system_path("x64"), nine_file)): + raise NineUnavailable("could not install " + nine_file + " (x64)") + def enable(self): if not self.nine_is_supported(): raise NineUnavailable("Nine is not supported on this system")
{"golden_diff": "diff --git a/lutris/util/wine/nine.py b/lutris/util/wine/nine.py\n--- a/lutris/util/wine/nine.py\n+++ b/lutris/util/wine/nine.py\n@@ -4,6 +4,7 @@\n \n from lutris.util import system\n from lutris.runners.commands.wine import wineexec\n+from lutris.util.wine.cabinstall import CabInstaller\n \n class NineUnavailable(RuntimeError):\n \"\"\"Exception raised when Gallium Nine is not available\"\"\"\n@@ -26,15 +27,10 @@\n of system library directory\n \"\"\"\n for mesa_file in NineManager.mesa_files:\n- if not any([os.path.exists(os.path.join(lib[0], \"d3d\", mesa_file))\n+ if not any([os.path.exists(os.path.join(lib, \"d3d\", mesa_file))\n for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n return False\n \n- if system.LINUX_SYSTEM.is_64_bit:\n- if not any([os.path.exists(os.path.join(lib[1], \"d3d\", mesa_file))\n- for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n- return False\n-\n return True\n \n @staticmethod\n@@ -44,15 +40,10 @@\n check 'wine/fakedlls' subdirectory of system library directory for Nine binaries\n \"\"\"\n for nine_file in NineManager.nine_files:\n- if not any([os.path.exists(os.path.join(lib[0], \"wine/fakedlls\", nine_file))\n+ if not any([os.path.exists(os.path.join(lib, \"wine/fakedlls\", nine_file))\n for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n return False\n \n- if system.LINUX_SYSTEM.is_64_bit:\n- if not any([os.path.exists(os.path.join(lib[1], \"wine/fakedlls\", nine_file))\n- for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n- return False\n-\n return True\n \n @staticmethod\n@@ -88,15 +79,24 @@\n def prepare_prefix(self):\n for nine_file in NineManager.nine_files:\n for lib in system.LINUX_SYSTEM.iter_lib_folders():\n- nine_file_32 = os.path.join(lib[0], \"wine/fakedlls\", nine_file)\n- if os.path.exists(nine_file_32):\n- shutil.copy(nine_file_32, self.get_system_path(\"x32\"))\n+ nine_file_path = os.path.join(lib, \"wine/fakedlls\", nine_file)\n+\n+ if (os.path.exists(nine_file_path) and\n+ CabInstaller.get_arch_from_dll(nine_file_path) == \"win32\"):\n+ shutil.copy(nine_file_path, self.get_system_path(\"x32\"))\n \n if self.wine_arch == \"win64\":\n- nine_file_64 = os.path.join(lib[1], \"wine/fakedlls\", nine_file)\n- if os.path.exists(nine_file_64):\n+ if (os.path.exists(nine_file_path) and\n+ CabInstaller.get_arch_from_dll(nine_file_path) == \"win64\"):\n shutil.copy(nine_file_64, self.get_system_path(\"x64\"))\n \n+ if not os.path.exists(os.path.join(self.get_system_path(\"x32\"), nine_file)):\n+ raise NineUnavailable(\"could not install \" + nine_file + \" (x32)\")\n+\n+ if self.wine_arch == \"win64\":\n+ if not os.path.exists(os.path.join(self.get_system_path(\"x64\"), nine_file)):\n+ raise NineUnavailable(\"could not install \" + nine_file + \" (x64)\")\n+\n def enable(self):\n if not self.nine_is_supported():\n raise NineUnavailable(\"Nine is not supported on this system\")\n", "issue": "Nine: Lutris can't find Nine libraries\nA lot of changed before creating PR #2092 and merging it - biggest issue is commit 792c22176eff9e063b22d7b9700e2e9b79a11fae, which changes return val of iter_lib_folders() from lists to strings. I believe I used iter_lib_folders() in #2092 because I needed distinguish between lib32 and lib64 paths. I will take a look at this and try to fix it ASAP (this week).\r\n\r\nOriginal report:\r\nhttps://github.com/lutris/lutris/pull/2092#issuecomment-529362315\n", "before_files": [{"content": "\"\"\"Gallium Nine helper module\"\"\"\nimport os\nimport shutil\n\nfrom lutris.util import system\nfrom lutris.runners.commands.wine import wineexec\n\nclass NineUnavailable(RuntimeError):\n \"\"\"Exception raised when Gallium Nine is not available\"\"\"\n\nclass NineManager:\n \"\"\"Utility class to install and manage Gallium Nine to a Wine prefix\"\"\"\n\n nine_files = (\"d3d9-nine.dll\", \"ninewinecfg.exe\")\n mesa_files = (\"d3dadapter9.so.1\",)\n\n def __init__(self, prefix, arch):\n self.prefix = prefix\n self.wine_arch = arch\n\n @staticmethod\n def nine_is_supported():\n \"\"\"Check if MESA is built with Gallium Nine state tracker support\n\n basic check for presence of d3dadapter9 library in 'd3d' subdirectory\n of system library directory\n \"\"\"\n for mesa_file in NineManager.mesa_files:\n if not any([os.path.exists(os.path.join(lib[0], \"d3d\", mesa_file))\n for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n return False\n\n if system.LINUX_SYSTEM.is_64_bit:\n if not any([os.path.exists(os.path.join(lib[1], \"d3d\", mesa_file))\n for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n return False\n\n return True\n\n @staticmethod\n def nine_is_installed():\n \"\"\"Check if Gallium Nine standalone is installed on this system\n\n check 'wine/fakedlls' subdirectory of system library directory for Nine binaries\n \"\"\"\n for nine_file in NineManager.nine_files:\n if not any([os.path.exists(os.path.join(lib[0], \"wine/fakedlls\", nine_file))\n for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n return False\n\n if system.LINUX_SYSTEM.is_64_bit:\n if not any([os.path.exists(os.path.join(lib[1], \"wine/fakedlls\", nine_file))\n for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n return False\n\n return True\n\n @staticmethod\n def is_available():\n \"\"\"Check if Gallium Nine can be enabled on this system\"\"\"\n return NineManager.nine_is_supported() and NineManager.nine_is_installed()\n\n def get_system_path(self, arch):\n \"\"\"Return path of Windows system directory with binaries of chosen architecture\"\"\"\n windows_path = os.path.join(self.prefix, \"drive_c/windows\")\n\n if self.wine_arch == \"win32\" and arch == \"x32\":\n return os.path.join(windows_path, \"system32\")\n if self.wine_arch == \"win64\" and arch == \"x32\":\n return os.path.join(windows_path, \"syswow64\")\n if self.wine_arch == \"win64\" and arch == \"x64\":\n return os.path.join(windows_path, \"system32\")\n\n return None\n\n def is_prefix_prepared(self):\n if not all(system.path_exists(os.path.join(self.get_system_path(\"x32\"), nine_file))\n for nine_file in self.nine_files):\n return False\n\n if self.wine_arch == \"win64\":\n if not all(system.path_exists(os.path.join(self.get_system_path(\"x64\"), nine_file))\n for nine_file in self.nine_files):\n return False\n\n return True\n\n def prepare_prefix(self):\n for nine_file in NineManager.nine_files:\n for lib in system.LINUX_SYSTEM.iter_lib_folders():\n nine_file_32 = os.path.join(lib[0], \"wine/fakedlls\", nine_file)\n if os.path.exists(nine_file_32):\n shutil.copy(nine_file_32, self.get_system_path(\"x32\"))\n\n if self.wine_arch == \"win64\":\n nine_file_64 = os.path.join(lib[1], \"wine/fakedlls\", nine_file)\n if os.path.exists(nine_file_64):\n shutil.copy(nine_file_64, self.get_system_path(\"x64\"))\n\n def enable(self):\n if not self.nine_is_supported():\n raise NineUnavailable(\"Nine is not supported on this system\")\n if not self.nine_is_installed():\n raise NineUnavailable(\"Nine Standalone is not installed\")\n if not self.is_prefix_prepared():\n self.prepare_prefix()\n\n wineexec(\n \"ninewinecfg\",\n args=\"-e\",\n prefix=self.prefix,\n blocking=True,\n )\n\n def disable(self):\n if self.is_prefix_prepared():\n wineexec(\n \"ninewinecfg\",\n args=\"-d\",\n prefix=self.prefix,\n blocking=True,\n )\n", "path": "lutris/util/wine/nine.py"}]}
2,000
855
gh_patches_debug_61134
rasdani/github-patches
git_diff
e2nIEE__pandapower-1293
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> networkx compatibility issue with python3.9 There's a networkx>=2.5 dependency to run pandapower under python3.9 as gcd has to be imported from math instead of fractions. </issue> <code> [start of setup.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics 4 # and Energy System Technology (IEE), Kassel. All rights reserved. 5 6 from setuptools import setup, find_packages 7 import re 8 9 with open('README.rst', 'rb') as f: 10 install = f.read().decode('utf-8') 11 12 with open('CHANGELOG.rst', 'rb') as f: 13 changelog = f.read().decode('utf-8') 14 15 classifiers = [ 16 'Development Status :: 5 - Production/Stable', 17 'Environment :: Console', 18 'Intended Audience :: Developers', 19 'Intended Audience :: Education', 20 'Intended Audience :: Science/Research', 21 'License :: OSI Approved :: BSD License', 22 'Natural Language :: English', 23 'Operating System :: OS Independent', 24 'Programming Language :: Python', 25 'Programming Language :: Python :: 3'] 26 27 with open('.github/workflows/github_test_action.yml', 'rb') as f: 28 lines = f.read().decode('utf-8') 29 versions = set(re.findall('3.[0-9]', lines)) 30 for version in versions: 31 classifiers.append('Programming Language :: Python :: 3.%s' % version[-1]) 32 33 long_description = '\n\n'.join((install, changelog)) 34 35 setup( 36 name='pandapower', 37 version='2.6.0', 38 author='Leon Thurner, Alexander Scheidler', 39 author_email='[email protected], [email protected]', 40 description='An easy to use open source tool for power system modeling, analysis and optimization with a high degree of automation.', 41 long_description=long_description, 42 long_description_content_type='text/x-rst', 43 url='http://www.pandapower.org', 44 license='BSD', 45 install_requires=["pandas>=0.17", 46 "networkx", 47 "scipy<=1.6.0", 48 "numpy>=0.11", 49 "packaging", 50 "xlsxwriter", 51 "xlrd", 52 "openpyxl", 53 "cryptography"], 54 extras_require={ 55 "docs": ["numpydoc", "sphinx", "sphinx_rtd_theme"], 56 "plotting": ["plotly", "matplotlib", "python-igraph"], 57 "test": ["pytest", "pytest-xdist"], 58 "performance": ["ortools"]}, 59 packages=find_packages(), 60 include_package_data=True, 61 classifiers=classifiers 62 ) 63 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -43,7 +43,7 @@ url='http://www.pandapower.org', license='BSD', install_requires=["pandas>=0.17", - "networkx", + "networkx>=2.5", "scipy<=1.6.0", "numpy>=0.11", "packaging",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -43,7 +43,7 @@\n url='http://www.pandapower.org',\n license='BSD',\n install_requires=[\"pandas>=0.17\",\n- \"networkx\",\n+ \"networkx>=2.5\",\n \"scipy<=1.6.0\",\n \"numpy>=0.11\",\n \"packaging\",\n", "issue": "networkx compatibility issue with python3.9\nThere's a networkx>=2.5 dependency to run pandapower under python3.9 as gcd has to be imported from math instead of fractions.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nfrom setuptools import setup, find_packages\nimport re\n\nwith open('README.rst', 'rb') as f:\n install = f.read().decode('utf-8')\n\nwith open('CHANGELOG.rst', 'rb') as f:\n changelog = f.read().decode('utf-8')\n\nclassifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3']\n\nwith open('.github/workflows/github_test_action.yml', 'rb') as f:\n lines = f.read().decode('utf-8')\n versions = set(re.findall('3.[0-9]', lines))\n for version in versions:\n classifiers.append('Programming Language :: Python :: 3.%s' % version[-1])\n\nlong_description = '\\n\\n'.join((install, changelog))\n\nsetup(\n name='pandapower',\n version='2.6.0',\n author='Leon Thurner, Alexander Scheidler',\n author_email='[email protected], [email protected]',\n description='An easy to use open source tool for power system modeling, analysis and optimization with a high degree of automation.',\n long_description=long_description,\n\tlong_description_content_type='text/x-rst',\n url='http://www.pandapower.org',\n license='BSD',\n install_requires=[\"pandas>=0.17\",\n \"networkx\",\n \"scipy<=1.6.0\",\n \"numpy>=0.11\",\n \"packaging\",\n \"xlsxwriter\",\n \"xlrd\",\n \"openpyxl\",\n \"cryptography\"],\n extras_require={\n \"docs\": [\"numpydoc\", \"sphinx\", \"sphinx_rtd_theme\"],\n \"plotting\": [\"plotly\", \"matplotlib\", \"python-igraph\"],\n \"test\": [\"pytest\", \"pytest-xdist\"],\n \"performance\": [\"ortools\"]},\n packages=find_packages(),\n include_package_data=True,\n classifiers=classifiers\n)\n", "path": "setup.py"}]}
1,253
103
gh_patches_debug_719
rasdani/github-patches
git_diff
mne-tools__mne-bids-pipeline-680
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Doc deployment step failing The latest CI run failed to execute documentation deployment: https://app.circleci.com/pipelines/github/mne-tools/mne-bids-pipeline/3557/workflows/3458e5cc-c471-4664-8d0a-b0cc4961f9eb/jobs/41986/parallel-runs/0/steps/0-107 ```shell #!/bin/bash -eo pipefail ./.circleci/setup_bash.sh CIRCLE_JOB=deploy_docs COMMIT_MESSAGE=68c63d6878992fb7c298f24420f1d349c6811079 MAINT: Use mike for doc deployment (#676) COMMIT_MESSAGE_ESCAPED=68c63d6878992fb7c298f24420f1d349c6811079 MAINT: Use mike for doc deployment (#676) CIRCLE_REQUESTED_JOB= Running job deploy_docs for main branch ./.circleci/setup_bash.sh: line 35: sudo: command not found Exited with code exit status 127 CircleCI received exit code 127 ``` </issue> <code> [start of docs/source/features/gen_steps.py] 1 #!/bin/env python 2 """Generate steps.md.""" 3 4 import importlib 5 from pathlib import Path 6 from mne_bids_pipeline._config_utils import _get_step_modules 7 8 pre = """\ 9 # Processing steps 10 11 The following table provides a concise summary of each step in the Study 12 Template. All steps exist in the `steps`/ directory. 13 """ 14 15 step_modules = _get_step_modules() 16 17 # Construct the lines of steps.md 18 lines = [pre] 19 for di, (dir_, modules) in enumerate(step_modules.items(), 1): 20 if dir_ == 'all': 21 continue # this is an alias 22 dir_module = importlib.import_module(f'mne_bids_pipeline.steps.{dir_}') 23 dir_header = dir_module.__doc__.split('\n')[0].rstrip('.') 24 dir_body = dir_module.__doc__.split('\n', maxsplit=1) 25 if len(dir_body) > 1: 26 dir_body = dir_body[1].strip() 27 else: 28 dir_body = '' 29 lines.append(f'## {di}. {dir_header}\n') 30 if dir_body: 31 lines.append(f'{dir_body}\n') 32 lines.append('| Processing step | Description |') 33 lines.append('|:----------------|:------------|') 34 # the "all" option 35 dir_name, step_title = dir_, f'Run all {dir_header.lower()} steps.' 36 lines.append(f'`{dir_name}` | {step_title} |') 37 for module in modules: 38 step_name = f'{dir_name}/{Path(module.__file__).name}'[:-3] 39 step_title = module.__doc__.split('\n')[0] 40 lines.append(f'`{step_name}` | {step_title} |') 41 lines.append('') 42 with open(Path(__file__).parent / 'steps.md', 'w') as fid: 43 fid.write('\n'.join(lines)) 44 [end of docs/source/features/gen_steps.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/source/features/gen_steps.py b/docs/source/features/gen_steps.py --- a/docs/source/features/gen_steps.py +++ b/docs/source/features/gen_steps.py @@ -12,6 +12,7 @@ Template. All steps exist in the `steps`/ directory. """ +print('Generating steps …') step_modules = _get_step_modules() # Construct the lines of steps.md
{"golden_diff": "diff --git a/docs/source/features/gen_steps.py b/docs/source/features/gen_steps.py\n--- a/docs/source/features/gen_steps.py\n+++ b/docs/source/features/gen_steps.py\n@@ -12,6 +12,7 @@\n Template. All steps exist in the `steps`/ directory.\n \"\"\"\n \n+print('Generating steps \u2026')\n step_modules = _get_step_modules()\n \n # Construct the lines of steps.md\n", "issue": "Doc deployment step failing\nThe latest CI run failed to execute documentation deployment:\r\nhttps://app.circleci.com/pipelines/github/mne-tools/mne-bids-pipeline/3557/workflows/3458e5cc-c471-4664-8d0a-b0cc4961f9eb/jobs/41986/parallel-runs/0/steps/0-107\r\n\r\n```shell\r\n#!/bin/bash -eo pipefail\r\n./.circleci/setup_bash.sh\r\nCIRCLE_JOB=deploy_docs\r\nCOMMIT_MESSAGE=68c63d6878992fb7c298f24420f1d349c6811079 MAINT: Use mike for doc deployment (#676)\r\nCOMMIT_MESSAGE_ESCAPED=68c63d6878992fb7c298f24420f1d349c6811079 MAINT: Use mike for doc deployment (#676)\r\nCIRCLE_REQUESTED_JOB=\r\nRunning job deploy_docs for main branch\r\n./.circleci/setup_bash.sh: line 35: sudo: command not found\r\n\r\nExited with code exit status 127\r\nCircleCI received exit code 127\r\n```\n", "before_files": [{"content": "#!/bin/env python\n\"\"\"Generate steps.md.\"\"\"\n\nimport importlib\nfrom pathlib import Path\nfrom mne_bids_pipeline._config_utils import _get_step_modules\n\npre = \"\"\"\\\n# Processing steps\n\nThe following table provides a concise summary of each step in the Study\nTemplate. All steps exist in the `steps`/ directory.\n\"\"\"\n\nstep_modules = _get_step_modules()\n\n# Construct the lines of steps.md\nlines = [pre]\nfor di, (dir_, modules) in enumerate(step_modules.items(), 1):\n if dir_ == 'all':\n continue # this is an alias\n dir_module = importlib.import_module(f'mne_bids_pipeline.steps.{dir_}')\n dir_header = dir_module.__doc__.split('\\n')[0].rstrip('.')\n dir_body = dir_module.__doc__.split('\\n', maxsplit=1)\n if len(dir_body) > 1:\n dir_body = dir_body[1].strip()\n else:\n dir_body = ''\n lines.append(f'## {di}. {dir_header}\\n')\n if dir_body:\n lines.append(f'{dir_body}\\n')\n lines.append('| Processing step | Description |')\n lines.append('|:----------------|:------------|')\n # the \"all\" option\n dir_name, step_title = dir_, f'Run all {dir_header.lower()} steps.'\n lines.append(f'`{dir_name}` | {step_title} |')\n for module in modules:\n step_name = f'{dir_name}/{Path(module.__file__).name}'[:-3]\n step_title = module.__doc__.split('\\n')[0]\n lines.append(f'`{step_name}` | {step_title} |')\n lines.append('')\nwith open(Path(__file__).parent / 'steps.md', 'w') as fid:\n fid.write('\\n'.join(lines))\n", "path": "docs/source/features/gen_steps.py"}]}
1,308
88
gh_patches_debug_2713
rasdani/github-patches
git_diff
wagtail__wagtail-9923
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Search on listing views doesn't work unless the `?q=` param exists in the URL <!-- Found a bug? Please fill out the sections below. 👍 --> ### Issue Summary Possible regression in https://github.com/wagtail/wagtail/pull/9768 The `URLSearchParams.get()` returns `null` if the param doesn't exist, so the following code: https://github.com/wagtail/wagtail/blob/a3f10acae17c892d843c419495e4204adb3ed991/client/src/entrypoints/admin/core.js#L270-L276 will crash during `currentQuery.trim()` when searching on the listing views (snippets, images, etc.) if the `?q=` param doesn't exist in the URL. Might be a good time to add `required=False` in here as well: https://github.com/wagtail/wagtail/blob/a3f10acae17c892d843c419495e4204adb3ed991/wagtail/admin/forms/search.py#L12 to remove this silly error when `q` is an empty string: <img width="473" alt="image" src="https://user-images.githubusercontent.com/6379424/213499685-ce37c064-2635-434f-952f-e85fae4ab9af.png"> <!-- A summary of the issue. --> ### Steps to Reproduce 1. Spin up bakerydemo 2. Open the images listing 3. Try to search </issue> <code> [start of wagtail/admin/forms/search.py] 1 from django import forms 2 from django.utils.translation import gettext as _ 3 from django.utils.translation import gettext_lazy 4 5 6 class SearchForm(forms.Form): 7 def __init__(self, *args, **kwargs): 8 placeholder = kwargs.pop("placeholder", _("Search")) 9 super().__init__(*args, **kwargs) 10 self.fields["q"].widget.attrs = {"placeholder": placeholder} 11 12 q = forms.CharField(label=gettext_lazy("Search term"), widget=forms.TextInput()) 13 [end of wagtail/admin/forms/search.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/admin/forms/search.py b/wagtail/admin/forms/search.py --- a/wagtail/admin/forms/search.py +++ b/wagtail/admin/forms/search.py @@ -9,4 +9,8 @@ super().__init__(*args, **kwargs) self.fields["q"].widget.attrs = {"placeholder": placeholder} - q = forms.CharField(label=gettext_lazy("Search term"), widget=forms.TextInput()) + q = forms.CharField( + label=gettext_lazy("Search term"), + widget=forms.TextInput(), + required=False, + )
{"golden_diff": "diff --git a/wagtail/admin/forms/search.py b/wagtail/admin/forms/search.py\n--- a/wagtail/admin/forms/search.py\n+++ b/wagtail/admin/forms/search.py\n@@ -9,4 +9,8 @@\n super().__init__(*args, **kwargs)\n self.fields[\"q\"].widget.attrs = {\"placeholder\": placeholder}\n \n- q = forms.CharField(label=gettext_lazy(\"Search term\"), widget=forms.TextInput())\n+ q = forms.CharField(\n+ label=gettext_lazy(\"Search term\"),\n+ widget=forms.TextInput(),\n+ required=False,\n+ )\n", "issue": "Search on listing views doesn't work unless the `?q=` param exists in the URL\n<!--\r\nFound a bug? Please fill out the sections below. \ud83d\udc4d\r\n-->\r\n\r\n### Issue Summary\r\n\r\nPossible regression in https://github.com/wagtail/wagtail/pull/9768\r\n\r\nThe `URLSearchParams.get()` returns `null` if the param doesn't exist, so the following code:\r\n\r\nhttps://github.com/wagtail/wagtail/blob/a3f10acae17c892d843c419495e4204adb3ed991/client/src/entrypoints/admin/core.js#L270-L276\r\n\r\nwill crash during `currentQuery.trim()` when searching on the listing views (snippets, images, etc.) if the `?q=` param doesn't exist in the URL.\r\n\r\nMight be a good time to add `required=False` in here as well:\r\n\r\nhttps://github.com/wagtail/wagtail/blob/a3f10acae17c892d843c419495e4204adb3ed991/wagtail/admin/forms/search.py#L12\r\n\r\nto remove this silly error when `q` is an empty string:\r\n\r\n<img width=\"473\" alt=\"image\" src=\"https://user-images.githubusercontent.com/6379424/213499685-ce37c064-2635-434f-952f-e85fae4ab9af.png\">\r\n\r\n<!--\r\nA summary of the issue.\r\n-->\r\n\r\n### Steps to Reproduce\r\n\r\n1. Spin up bakerydemo\r\n2. Open the images listing\r\n3. Try to search\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import gettext_lazy\n\n\nclass SearchForm(forms.Form):\n def __init__(self, *args, **kwargs):\n placeholder = kwargs.pop(\"placeholder\", _(\"Search\"))\n super().__init__(*args, **kwargs)\n self.fields[\"q\"].widget.attrs = {\"placeholder\": placeholder}\n\n q = forms.CharField(label=gettext_lazy(\"Search term\"), widget=forms.TextInput())\n", "path": "wagtail/admin/forms/search.py"}]}
1,030
125
gh_patches_debug_30559
rasdani/github-patches
git_diff
Gallopsled__pwntools-343
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pwnlib.term.text throws exceptions on attribute accesses ``` >>> import pwnlib >>> pwnlib.term.text.__lol__ Traceback (most recent call last): File "<stdin>", line 1, in <module> File "pwnlib/term/text.py", line 99, in __getattr__ init += self._fg_color(c()) File "pwnlib/term/text.py", line 93, in c return self._colors[c] + bright KeyError: '' ``` </issue> <code> [start of pwnlib/term/text.py] 1 import types, sys, functools 2 from . import termcap 3 4 def eval_when(when): 5 if isinstance(when, file) or \ 6 when in ('always', 'never', 'auto', sys.stderr, sys.stdout): 7 if when == 'always': 8 return True 9 elif when == 'never': 10 return False 11 elif when == 'auto': 12 return sys.stdout.isatty() 13 else: 14 return when.isatty() 15 else: 16 raise ValueError('text.when: must be a file-object or "always", "never" or "auto"') 17 18 class Module(types.ModuleType): 19 def __init__(self): 20 self.__file__ = __file__ 21 self.__name__ = __name__ 22 self.num_colors = termcap.get('colors', default = 8) 23 self.has_bright = self.num_colors >= 16 24 self.has_gray = self.has_bright 25 self.when = 'auto' 26 self._colors = { 27 'black': 0, 28 'red': 1, 29 'green': 2, 30 'yellow': 3, 31 'blue': 4, 32 'magenta': 5, 33 'cyan': 6, 34 'white': 7, 35 } 36 self._reset = '\x1b[m' 37 self._attributes = {} 38 for x, y in [('italic' , 'sitm'), 39 ('bold' , 'bold'), 40 ('underline', 'smul'), 41 ('reverse' , 'rev')]: 42 s = termcap.get(y) 43 self._attributes[x] = s 44 self._cache = {} 45 46 @property 47 def when(self): 48 return self._when 49 50 @when.setter 51 def when(self, val): 52 self._when = eval_when(val) 53 54 def _fg_color(self, c): 55 return termcap.get('setaf', c) or termcap.get('setf', c) 56 57 def _bg_color(self, c): 58 return termcap.get('setab', c) or termcap.get('setb', c) 59 60 def _decorator(self, desc, init): 61 def f(self, s, when = None): 62 if when: 63 if eval_when(when): 64 return init + s + self._reset 65 else: 66 return s 67 else: 68 if self.when: 69 return init + s + self._reset 70 else: 71 return s 72 setattr(Module, desc, f) 73 return functools.partial(f, self) 74 75 def __getattr__(self, desc): 76 ds = desc.replace('gray', 'bright_black').split('_') 77 init = '' 78 while ds: 79 d = ds[0] 80 try: 81 init += self._attributes[d] 82 ds.pop(0) 83 except KeyError: 84 break 85 def c(): 86 bright = 0 87 c = ds.pop(0) 88 if c == 'bright': 89 c = ds.pop(0) 90 if self.has_bright: 91 bright = 8 92 return self._colors[c] + bright 93 if ds: 94 if ds[0] == 'on': 95 ds.pop(0) 96 init += self._bg_color(c()) 97 else: 98 init += self._fg_color(c()) 99 if len(ds): 100 assert ds.pop(0) == 'on' 101 init += self._bg_color(c()) 102 return self._decorator(desc, init) 103 104 def get(self, desc): 105 return self.__getattr__(desc) 106 107 tether = sys.modules[__name__] 108 sys.modules[__name__] = Module() 109 [end of pwnlib/term/text.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwnlib/term/text.py b/pwnlib/term/text.py --- a/pwnlib/term/text.py +++ b/pwnlib/term/text.py @@ -73,33 +73,36 @@ return functools.partial(f, self) def __getattr__(self, desc): - ds = desc.replace('gray', 'bright_black').split('_') - init = '' - while ds: - d = ds[0] - try: - init += self._attributes[d] - ds.pop(0) - except KeyError: - break - def c(): - bright = 0 - c = ds.pop(0) - if c == 'bright': + try: + ds = desc.replace('gray', 'bright_black').split('_') + init = '' + while ds: + d = ds[0] + try: + init += self._attributes[d] + ds.pop(0) + except KeyError: + break + def c(): + bright = 0 c = ds.pop(0) - if self.has_bright: - bright = 8 - return self._colors[c] + bright - if ds: - if ds[0] == 'on': - ds.pop(0) - init += self._bg_color(c()) - else: - init += self._fg_color(c()) - if len(ds): - assert ds.pop(0) == 'on' + if c == 'bright': + c = ds.pop(0) + if self.has_bright: + bright = 8 + return self._colors[c] + bright + if ds: + if ds[0] == 'on': + ds.pop(0) init += self._bg_color(c()) - return self._decorator(desc, init) + else: + init += self._fg_color(c()) + if len(ds): + assert ds.pop(0) == 'on' + init += self._bg_color(c()) + return self._decorator(desc, init) + except (IndexError, KeyError): + raise AttributeError("'module' object has no attribute %r" % desc) def get(self, desc): return self.__getattr__(desc)
{"golden_diff": "diff --git a/pwnlib/term/text.py b/pwnlib/term/text.py\n--- a/pwnlib/term/text.py\n+++ b/pwnlib/term/text.py\n@@ -73,33 +73,36 @@\n return functools.partial(f, self)\n \n def __getattr__(self, desc):\n- ds = desc.replace('gray', 'bright_black').split('_')\n- init = ''\n- while ds:\n- d = ds[0]\n- try:\n- init += self._attributes[d]\n- ds.pop(0)\n- except KeyError:\n- break\n- def c():\n- bright = 0\n- c = ds.pop(0)\n- if c == 'bright':\n+ try:\n+ ds = desc.replace('gray', 'bright_black').split('_')\n+ init = ''\n+ while ds:\n+ d = ds[0]\n+ try:\n+ init += self._attributes[d]\n+ ds.pop(0)\n+ except KeyError:\n+ break\n+ def c():\n+ bright = 0\n c = ds.pop(0)\n- if self.has_bright:\n- bright = 8\n- return self._colors[c] + bright\n- if ds:\n- if ds[0] == 'on':\n- ds.pop(0)\n- init += self._bg_color(c())\n- else:\n- init += self._fg_color(c())\n- if len(ds):\n- assert ds.pop(0) == 'on'\n+ if c == 'bright':\n+ c = ds.pop(0)\n+ if self.has_bright:\n+ bright = 8\n+ return self._colors[c] + bright\n+ if ds:\n+ if ds[0] == 'on':\n+ ds.pop(0)\n init += self._bg_color(c())\n- return self._decorator(desc, init)\n+ else:\n+ init += self._fg_color(c())\n+ if len(ds):\n+ assert ds.pop(0) == 'on'\n+ init += self._bg_color(c())\n+ return self._decorator(desc, init)\n+ except (IndexError, KeyError):\n+ raise AttributeError(\"'module' object has no attribute %r\" % desc)\n \n def get(self, desc):\n return self.__getattr__(desc)\n", "issue": "pwnlib.term.text throws exceptions on attribute accesses\n```\n>>> import pwnlib\n>>> pwnlib.term.text.__lol__\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"pwnlib/term/text.py\", line 99, in __getattr__\n init += self._fg_color(c())\n File \"pwnlib/term/text.py\", line 93, in c\n return self._colors[c] + bright\nKeyError: ''\n```\n\n", "before_files": [{"content": "import types, sys, functools\nfrom . import termcap\n\ndef eval_when(when):\n if isinstance(when, file) or \\\n when in ('always', 'never', 'auto', sys.stderr, sys.stdout):\n if when == 'always':\n return True\n elif when == 'never':\n return False\n elif when == 'auto':\n return sys.stdout.isatty()\n else:\n return when.isatty()\n else:\n raise ValueError('text.when: must be a file-object or \"always\", \"never\" or \"auto\"')\n\nclass Module(types.ModuleType):\n def __init__(self):\n self.__file__ = __file__\n self.__name__ = __name__\n self.num_colors = termcap.get('colors', default = 8)\n self.has_bright = self.num_colors >= 16\n self.has_gray = self.has_bright\n self.when = 'auto'\n self._colors = {\n 'black': 0,\n 'red': 1,\n 'green': 2,\n 'yellow': 3,\n 'blue': 4,\n 'magenta': 5,\n 'cyan': 6,\n 'white': 7,\n }\n self._reset = '\\x1b[m'\n self._attributes = {}\n for x, y in [('italic' , 'sitm'),\n ('bold' , 'bold'),\n ('underline', 'smul'),\n ('reverse' , 'rev')]:\n s = termcap.get(y)\n self._attributes[x] = s\n self._cache = {}\n\n @property\n def when(self):\n return self._when\n\n @when.setter\n def when(self, val):\n self._when = eval_when(val)\n\n def _fg_color(self, c):\n return termcap.get('setaf', c) or termcap.get('setf', c)\n\n def _bg_color(self, c):\n return termcap.get('setab', c) or termcap.get('setb', c)\n\n def _decorator(self, desc, init):\n def f(self, s, when = None):\n if when:\n if eval_when(when):\n return init + s + self._reset\n else:\n return s\n else:\n if self.when:\n return init + s + self._reset\n else:\n return s\n setattr(Module, desc, f)\n return functools.partial(f, self)\n\n def __getattr__(self, desc):\n ds = desc.replace('gray', 'bright_black').split('_')\n init = ''\n while ds:\n d = ds[0]\n try:\n init += self._attributes[d]\n ds.pop(0)\n except KeyError:\n break\n def c():\n bright = 0\n c = ds.pop(0)\n if c == 'bright':\n c = ds.pop(0)\n if self.has_bright:\n bright = 8\n return self._colors[c] + bright\n if ds:\n if ds[0] == 'on':\n ds.pop(0)\n init += self._bg_color(c())\n else:\n init += self._fg_color(c())\n if len(ds):\n assert ds.pop(0) == 'on'\n init += self._bg_color(c())\n return self._decorator(desc, init)\n\n def get(self, desc):\n return self.__getattr__(desc)\n\ntether = sys.modules[__name__]\nsys.modules[__name__] = Module()\n", "path": "pwnlib/term/text.py"}]}
1,641
524
gh_patches_debug_35673
rasdani/github-patches
git_diff
amundsen-io__amundsen-1573
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug Report - Postegres / Redshift Extractors have inconsistent behavior <!--- Provide a general summary of the issue in the Title above --> <!--- Look through existing open and closed issues to see if someone has reported the issue before --> ## Expected Behavior ```python # For RedshiftMetadataExtractor redshift_metadata_extractor_config = ConfigFactory.from_dict({ 'extractor.redshift_metadata.where_clause_suffix': 'schema <> "foo"' }) ``` ## Current Behavior ```python # For RedshiftMetadataExtractor redshift_metadata_extractor_config = ConfigFactory.from_dict({ 'extractor.redshift_metadata.where_clause_suffix': 'where schema <> "foo"' }) ``` ## Possible Solution Update redshift extractor to properly build suffix ## Screenshots (if appropriate) ## Context Both the `PostgresMetadataExtractor` and the `RedshiftMetadataExtractor` take the config `extractor.redshift_metadata.where_clause_suffix` as they are both based off of `BasePostgresMetadataExtractor`. However they require slightly different different sql partials, which is unexpected: The `where_clause_suffix` for `RedshiftMetadataExtractor` takes a partial sql statement **without** a `WHERE`. The `where_clause_suffix` for `PostgresMetadataExtractor` take a partial sql statement **with** a `WHERE`. The name `where_clause_suffix` implies that its a statement appended to a where clause and should _not_ (IMHO) require a `WHERE` statement. <!--- How has this issue affected you? --> <!--- Providing context helps us come up with a solution that is most useful in the real world --> ## Your Environment <!--- Include as many relevant details about the environment you experienced the bug in --> * Amunsen version used: amundsen-databuilder 6.3.1 * Data warehouse stores: Neo4j * Deployment (k8s or native): terraform * Link to your fork or repository: </issue> <code> [start of databuilder/databuilder/extractor/redshift_metadata_extractor.py] 1 # Copyright Contributors to the Amundsen project. 2 # SPDX-License-Identifier: Apache-2.0 3 4 from typing import ( # noqa: F401 5 Any, Dict, Iterator, Union, 6 ) 7 8 from pyhocon import ConfigFactory, ConfigTree # noqa: F401 9 10 from databuilder.extractor.base_postgres_metadata_extractor import BasePostgresMetadataExtractor 11 12 13 class RedshiftMetadataExtractor(BasePostgresMetadataExtractor): 14 """ 15 Extracts Redshift table and column metadata from underlying meta store database using SQLAlchemyExtractor 16 17 This differs from the PostgresMetadataExtractor because in order to support Redshift's late binding views, 18 we need to join the INFORMATION_SCHEMA data against the function PG_GET_LATE_BINDING_VIEW_COLS(). 19 """ 20 21 def get_sql_statement(self, use_catalog_as_cluster_name: bool, where_clause_suffix: str) -> str: 22 if use_catalog_as_cluster_name: 23 cluster_source = "CURRENT_DATABASE()" 24 else: 25 cluster_source = f"'{self._cluster}'" 26 27 return """ 28 SELECT 29 * 30 FROM ( 31 SELECT 32 {cluster_source} as cluster, 33 c.table_schema as schema, 34 c.table_name as name, 35 pgtd.description as description, 36 c.column_name as col_name, 37 c.data_type as col_type, 38 pgcd.description as col_description, 39 ordinal_position as col_sort_order 40 FROM INFORMATION_SCHEMA.COLUMNS c 41 INNER JOIN 42 pg_catalog.pg_statio_all_tables as st on c.table_schema=st.schemaname and c.table_name=st.relname 43 LEFT JOIN 44 pg_catalog.pg_description pgcd on pgcd.objoid=st.relid and pgcd.objsubid=c.ordinal_position 45 LEFT JOIN 46 pg_catalog.pg_description pgtd on pgtd.objoid=st.relid and pgtd.objsubid=0 47 48 UNION 49 50 SELECT 51 {cluster_source} as cluster, 52 view_schema as schema, 53 view_name as name, 54 NULL as description, 55 column_name as col_name, 56 data_type as col_type, 57 NULL as col_description, 58 ordinal_position as col_sort_order 59 FROM 60 PG_GET_LATE_BINDING_VIEW_COLS() 61 COLS(view_schema NAME, view_name NAME, column_name NAME, data_type VARCHAR, ordinal_position INT) 62 63 UNION 64 65 SELECT 66 {cluster_source} AS cluster, 67 schemaname AS schema, 68 tablename AS name, 69 NULL AS description, 70 columnname AS col_name, 71 external_type AS col_type, 72 NULL AS col_description, 73 columnnum AS col_sort_order 74 FROM svv_external_columns 75 ) 76 77 {where_clause_suffix} 78 ORDER by cluster, schema, name, col_sort_order ; 79 """.format( 80 cluster_source=cluster_source, 81 where_clause_suffix=where_clause_suffix, 82 ) 83 84 def get_scope(self) -> str: 85 return 'extractor.redshift_metadata' 86 [end of databuilder/databuilder/extractor/redshift_metadata_extractor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/databuilder/databuilder/extractor/redshift_metadata_extractor.py b/databuilder/databuilder/extractor/redshift_metadata_extractor.py --- a/databuilder/databuilder/extractor/redshift_metadata_extractor.py +++ b/databuilder/databuilder/extractor/redshift_metadata_extractor.py @@ -1,6 +1,7 @@ # Copyright Contributors to the Amundsen project. # SPDX-License-Identifier: Apache-2.0 +import logging from typing import ( # noqa: F401 Any, Dict, Iterator, Union, ) @@ -9,11 +10,14 @@ from databuilder.extractor.base_postgres_metadata_extractor import BasePostgresMetadataExtractor +LOGGER = logging.getLogger(__name__) + class RedshiftMetadataExtractor(BasePostgresMetadataExtractor): """ Extracts Redshift table and column metadata from underlying meta store database using SQLAlchemyExtractor + This differs from the PostgresMetadataExtractor because in order to support Redshift's late binding views, we need to join the INFORMATION_SCHEMA data against the function PG_GET_LATE_BINDING_VIEW_COLS(). """ @@ -24,6 +28,15 @@ else: cluster_source = f"'{self._cluster}'" + if where_clause_suffix: + if where_clause_suffix.lower().startswith("where"): + LOGGER.warning("you no longer need to begin with 'where' in your suffix") + where_clause = where_clause_suffix + else: + where_clause = f"where {where_clause_suffix}" + else: + where_clause = "" + return """ SELECT * @@ -74,11 +87,11 @@ FROM svv_external_columns ) - {where_clause_suffix} + {where_clause} ORDER by cluster, schema, name, col_sort_order ; """.format( cluster_source=cluster_source, - where_clause_suffix=where_clause_suffix, + where_clause=where_clause, ) def get_scope(self) -> str:
{"golden_diff": "diff --git a/databuilder/databuilder/extractor/redshift_metadata_extractor.py b/databuilder/databuilder/extractor/redshift_metadata_extractor.py\n--- a/databuilder/databuilder/extractor/redshift_metadata_extractor.py\n+++ b/databuilder/databuilder/extractor/redshift_metadata_extractor.py\n@@ -1,6 +1,7 @@\n # Copyright Contributors to the Amundsen project.\n # SPDX-License-Identifier: Apache-2.0\n \n+import logging\n from typing import ( # noqa: F401\n Any, Dict, Iterator, Union,\n )\n@@ -9,11 +10,14 @@\n \n from databuilder.extractor.base_postgres_metadata_extractor import BasePostgresMetadataExtractor\n \n+LOGGER = logging.getLogger(__name__)\n+\n \n class RedshiftMetadataExtractor(BasePostgresMetadataExtractor):\n \"\"\"\n Extracts Redshift table and column metadata from underlying meta store database using SQLAlchemyExtractor\n \n+\n This differs from the PostgresMetadataExtractor because in order to support Redshift's late binding views,\n we need to join the INFORMATION_SCHEMA data against the function PG_GET_LATE_BINDING_VIEW_COLS().\n \"\"\"\n@@ -24,6 +28,15 @@\n else:\n cluster_source = f\"'{self._cluster}'\"\n \n+ if where_clause_suffix:\n+ if where_clause_suffix.lower().startswith(\"where\"):\n+ LOGGER.warning(\"you no longer need to begin with 'where' in your suffix\")\n+ where_clause = where_clause_suffix\n+ else:\n+ where_clause = f\"where {where_clause_suffix}\"\n+ else:\n+ where_clause = \"\"\n+\n return \"\"\"\n SELECT\n *\n@@ -74,11 +87,11 @@\n FROM svv_external_columns\n )\n \n- {where_clause_suffix}\n+ {where_clause}\n ORDER by cluster, schema, name, col_sort_order ;\n \"\"\".format(\n cluster_source=cluster_source,\n- where_clause_suffix=where_clause_suffix,\n+ where_clause=where_clause,\n )\n \n def get_scope(self) -> str:\n", "issue": "Bug Report - Postegres / Redshift Extractors have inconsistent behavior\n<!--- Provide a general summary of the issue in the Title above -->\r\n<!--- Look through existing open and closed issues to see if someone has reported the issue before -->\r\n\r\n## Expected Behavior\r\n\r\n```python\r\n# For RedshiftMetadataExtractor\r\nredshift_metadata_extractor_config = ConfigFactory.from_dict({\r\n 'extractor.redshift_metadata.where_clause_suffix': 'schema <> \"foo\"'\r\n})\r\n```\r\n\r\n## Current Behavior\r\n\r\n```python\r\n# For RedshiftMetadataExtractor\r\nredshift_metadata_extractor_config = ConfigFactory.from_dict({\r\n 'extractor.redshift_metadata.where_clause_suffix': 'where schema <> \"foo\"'\r\n})\r\n```\r\n\r\n## Possible Solution\r\n\r\nUpdate redshift extractor to properly build suffix\r\n\r\n## Screenshots (if appropriate)\r\n\r\n## Context\r\n\r\nBoth the `PostgresMetadataExtractor` and the `RedshiftMetadataExtractor` take the config `extractor.redshift_metadata.where_clause_suffix` as they are both based off of `BasePostgresMetadataExtractor`.\r\n\r\nHowever they require slightly different different sql partials, which is unexpected: \r\n\r\nThe `where_clause_suffix` for `RedshiftMetadataExtractor` takes a partial sql statement **without** a `WHERE`.\r\nThe `where_clause_suffix` for `PostgresMetadataExtractor` take a partial sql statement **with** a `WHERE`.\r\n\r\nThe name `where_clause_suffix` implies that its a statement appended to a where clause and should _not_ (IMHO) require a `WHERE` statement.\r\n\r\n\r\n<!--- How has this issue affected you? -->\r\n<!--- Providing context helps us come up with a solution that is most useful in the real world -->\r\n\r\n## Your Environment\r\n<!--- Include as many relevant details about the environment you experienced the bug in -->\r\n* Amunsen version used: amundsen-databuilder 6.3.1\r\n* Data warehouse stores: Neo4j\r\n* Deployment (k8s or native): terraform\r\n* Link to your fork or repository:\n", "before_files": [{"content": "# Copyright Contributors to the Amundsen project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom typing import ( # noqa: F401\n Any, Dict, Iterator, Union,\n)\n\nfrom pyhocon import ConfigFactory, ConfigTree # noqa: F401\n\nfrom databuilder.extractor.base_postgres_metadata_extractor import BasePostgresMetadataExtractor\n\n\nclass RedshiftMetadataExtractor(BasePostgresMetadataExtractor):\n \"\"\"\n Extracts Redshift table and column metadata from underlying meta store database using SQLAlchemyExtractor\n\n This differs from the PostgresMetadataExtractor because in order to support Redshift's late binding views,\n we need to join the INFORMATION_SCHEMA data against the function PG_GET_LATE_BINDING_VIEW_COLS().\n \"\"\"\n\n def get_sql_statement(self, use_catalog_as_cluster_name: bool, where_clause_suffix: str) -> str:\n if use_catalog_as_cluster_name:\n cluster_source = \"CURRENT_DATABASE()\"\n else:\n cluster_source = f\"'{self._cluster}'\"\n\n return \"\"\"\n SELECT\n *\n FROM (\n SELECT\n {cluster_source} as cluster,\n c.table_schema as schema,\n c.table_name as name,\n pgtd.description as description,\n c.column_name as col_name,\n c.data_type as col_type,\n pgcd.description as col_description,\n ordinal_position as col_sort_order\n FROM INFORMATION_SCHEMA.COLUMNS c\n INNER JOIN\n pg_catalog.pg_statio_all_tables as st on c.table_schema=st.schemaname and c.table_name=st.relname\n LEFT JOIN\n pg_catalog.pg_description pgcd on pgcd.objoid=st.relid and pgcd.objsubid=c.ordinal_position\n LEFT JOIN\n pg_catalog.pg_description pgtd on pgtd.objoid=st.relid and pgtd.objsubid=0\n\n UNION\n\n SELECT\n {cluster_source} as cluster,\n view_schema as schema,\n view_name as name,\n NULL as description,\n column_name as col_name,\n data_type as col_type,\n NULL as col_description,\n ordinal_position as col_sort_order\n FROM\n PG_GET_LATE_BINDING_VIEW_COLS()\n COLS(view_schema NAME, view_name NAME, column_name NAME, data_type VARCHAR, ordinal_position INT)\n\n UNION\n\n SELECT\n {cluster_source} AS cluster,\n schemaname AS schema,\n tablename AS name,\n NULL AS description,\n columnname AS col_name,\n external_type AS col_type,\n NULL AS col_description,\n columnnum AS col_sort_order\n FROM svv_external_columns\n )\n\n {where_clause_suffix}\n ORDER by cluster, schema, name, col_sort_order ;\n \"\"\".format(\n cluster_source=cluster_source,\n where_clause_suffix=where_clause_suffix,\n )\n\n def get_scope(self) -> str:\n return 'extractor.redshift_metadata'\n", "path": "databuilder/databuilder/extractor/redshift_metadata_extractor.py"}]}
1,743
446
gh_patches_debug_2371
rasdani/github-patches
git_diff
Kinto__kinto-1040
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Kinto Admin version not shown in footer ![screenshot from 2017-01-24 17-01-08](https://cloud.githubusercontent.com/assets/546692/22255110/b87c08e2-e256-11e6-9880-b85014bb9f0a.png) Looks like the effects of this are lost when packaged as a kinto plugin: https://github.com/Kinto/kinto-admin/commit/8b184b041121ed4affddfbc2ce98ce658226ee34 </issue> <code> [start of kinto/plugins/admin/__init__.py] 1 from pyramid.static import static_view 2 from pyramid.httpexceptions import HTTPTemporaryRedirect 3 4 5 def includeme(config): 6 # Process settings to remove storage wording. 7 8 # Expose capability. 9 config.add_api_capability( 10 "admin", 11 version="1.8.0", 12 description="Serves the admin console.", 13 url="https://github.com/Kinto/kinto-admin/", 14 ) 15 16 build_dir = static_view('kinto.plugins.admin:build', use_subpath=True) 17 config.add_route('catchall_static', '/admin/*subpath') 18 config.add_view(build_dir, route_name="catchall_static") 19 20 # Setup redirect without trailing slash. 21 def admin_redirect_view(request): 22 raise HTTPTemporaryRedirect(request.path + '/') 23 24 config.add_route('admin_redirect', '/admin') 25 config.add_view(admin_redirect_view, route_name="admin_redirect") 26 [end of kinto/plugins/admin/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kinto/plugins/admin/__init__.py b/kinto/plugins/admin/__init__.py --- a/kinto/plugins/admin/__init__.py +++ b/kinto/plugins/admin/__init__.py @@ -8,7 +8,7 @@ # Expose capability. config.add_api_capability( "admin", - version="1.8.0", + version="1.8.1", description="Serves the admin console.", url="https://github.com/Kinto/kinto-admin/", )
{"golden_diff": "diff --git a/kinto/plugins/admin/__init__.py b/kinto/plugins/admin/__init__.py\n--- a/kinto/plugins/admin/__init__.py\n+++ b/kinto/plugins/admin/__init__.py\n@@ -8,7 +8,7 @@\n # Expose capability.\n config.add_api_capability(\n \"admin\",\n- version=\"1.8.0\",\n+ version=\"1.8.1\",\n description=\"Serves the admin console.\",\n url=\"https://github.com/Kinto/kinto-admin/\",\n )\n", "issue": "Kinto Admin version not shown in footer\n\r\n![screenshot from 2017-01-24 17-01-08](https://cloud.githubusercontent.com/assets/546692/22255110/b87c08e2-e256-11e6-9880-b85014bb9f0a.png)\r\n\r\nLooks like the effects of this are lost when packaged as a kinto plugin:\r\n\r\nhttps://github.com/Kinto/kinto-admin/commit/8b184b041121ed4affddfbc2ce98ce658226ee34\n", "before_files": [{"content": "from pyramid.static import static_view\nfrom pyramid.httpexceptions import HTTPTemporaryRedirect\n\n\ndef includeme(config):\n # Process settings to remove storage wording.\n\n # Expose capability.\n config.add_api_capability(\n \"admin\",\n version=\"1.8.0\",\n description=\"Serves the admin console.\",\n url=\"https://github.com/Kinto/kinto-admin/\",\n )\n\n build_dir = static_view('kinto.plugins.admin:build', use_subpath=True)\n config.add_route('catchall_static', '/admin/*subpath')\n config.add_view(build_dir, route_name=\"catchall_static\")\n\n # Setup redirect without trailing slash.\n def admin_redirect_view(request):\n raise HTTPTemporaryRedirect(request.path + '/')\n\n config.add_route('admin_redirect', '/admin')\n config.add_view(admin_redirect_view, route_name=\"admin_redirect\")\n", "path": "kinto/plugins/admin/__init__.py"}]}
923
115
gh_patches_debug_174
rasdani/github-patches
git_diff
pulp__pulpcore-3469
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Expose "get_url" via the plugin interface </issue> <code> [start of pulpcore/plugin/util.py] 1 from pulpcore.app.role_util import ( # noqa 2 assign_role, 3 get_groups_with_perms, 4 get_groups_with_perms_attached_perms, 5 get_groups_with_perms_attached_roles, 6 get_objects_for_group, 7 get_objects_for_user, 8 get_perms_for_model, 9 get_users_with_perms, 10 get_users_with_perms_attached_perms, 11 get_users_with_perms_attached_roles, 12 remove_role, 13 ) 14 15 from pulpcore.app.util import get_artifact_url, gpg_verify, verify_signature # noqa 16 [end of pulpcore/plugin/util.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pulpcore/plugin/util.py b/pulpcore/plugin/util.py --- a/pulpcore/plugin/util.py +++ b/pulpcore/plugin/util.py @@ -12,4 +12,4 @@ remove_role, ) -from pulpcore.app.util import get_artifact_url, gpg_verify, verify_signature # noqa +from pulpcore.app.util import get_artifact_url, get_url, gpg_verify, verify_signature # noqa
{"golden_diff": "diff --git a/pulpcore/plugin/util.py b/pulpcore/plugin/util.py\n--- a/pulpcore/plugin/util.py\n+++ b/pulpcore/plugin/util.py\n@@ -12,4 +12,4 @@\n remove_role,\n )\n \n-from pulpcore.app.util import get_artifact_url, gpg_verify, verify_signature # noqa\n+from pulpcore.app.util import get_artifact_url, get_url, gpg_verify, verify_signature # noqa\n", "issue": "Expose \"get_url\" via the plugin interface\n\n", "before_files": [{"content": "from pulpcore.app.role_util import ( # noqa\n assign_role,\n get_groups_with_perms,\n get_groups_with_perms_attached_perms,\n get_groups_with_perms_attached_roles,\n get_objects_for_group,\n get_objects_for_user,\n get_perms_for_model,\n get_users_with_perms,\n get_users_with_perms_attached_perms,\n get_users_with_perms_attached_roles,\n remove_role,\n)\n\nfrom pulpcore.app.util import get_artifact_url, gpg_verify, verify_signature # noqa\n", "path": "pulpcore/plugin/util.py"}]}
674
101
gh_patches_debug_21011
rasdani/github-patches
git_diff
ivy-llc__ivy-18334
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> compress </issue> <code> [start of ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py] 1 import ivy 2 from ivy.functional.frontends.numpy.func_wrapper import ( 3 to_ivy_arrays_and_back, 4 inputs_to_ivy_arrays, 5 ) 6 7 8 @to_ivy_arrays_and_back 9 def take_along_axis(arr, indices, axis): 10 return ivy.take_along_axis(arr, indices, axis) 11 12 13 @to_ivy_arrays_and_back 14 def tril_indices(n, k=0, m=None): 15 return ivy.tril_indices(n, m, k) 16 17 18 @to_ivy_arrays_and_back 19 def indices(dimensions, dtype=int, sparse=False): 20 dimensions = tuple(dimensions) 21 N = len(dimensions) 22 shape = (1,) * N 23 if sparse: 24 res = tuple() 25 else: 26 res = ivy.empty((N,) + dimensions, dtype=dtype) 27 for i, dim in enumerate(dimensions): 28 idx = ivy.arange(dim, dtype=dtype).reshape(shape[:i] + (dim,) + shape[i + 1 :]) 29 if sparse: 30 res = res + (idx,) 31 else: 32 res[i] = idx 33 return res 34 35 36 # unravel_index 37 @to_ivy_arrays_and_back 38 def unravel_index(indices, shape, order="C"): 39 ret = [x.astype("int64") for x in ivy.unravel_index(indices, shape)] 40 return tuple(ret) 41 42 43 @to_ivy_arrays_and_back 44 def fill_diagonal(a, val, wrap=False): 45 if a.ndim < 2: 46 raise ValueError("array must be at least 2-d") 47 end = None 48 if a.ndim == 2: 49 # Explicit, fast formula for the common case. For 2-d arrays, we 50 # accept rectangular ones. 51 step = a.shape[1] + 1 52 # This is needed to don't have tall matrix have the diagonal wrap. 53 if not wrap: 54 end = a.shape[1] * a.shape[1] 55 else: 56 # For more than d=2, the strided formula is only valid for arrays with 57 # all dimensions equal, so we check first. 58 if not ivy.all(ivy.diff(a.shape) == 0): 59 raise ValueError("All dimensions of input must be of equal length") 60 step = 1 + ivy.sum(ivy.cumprod(a.shape[:-1])) 61 62 # Write the value out into the diagonal. 63 shape = a.shape 64 a = ivy.reshape(a, a.size) 65 a[:end:step] = val 66 a = ivy.reshape(a, shape) 67 68 69 @inputs_to_ivy_arrays 70 def put_along_axis(arr, indices, values, axis): 71 ivy.put_along_axis(arr, indices, values, axis) 72 73 74 def diag(v, k=0): 75 return ivy.diag(v, k=k) 76 77 78 @to_ivy_arrays_and_back 79 def diagonal(a, offset, axis1, axis2): 80 return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2) 81 [end of ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py b/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py --- a/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py +++ b/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py @@ -2,6 +2,7 @@ from ivy.functional.frontends.numpy.func_wrapper import ( to_ivy_arrays_and_back, inputs_to_ivy_arrays, + handle_numpy_out, ) @@ -78,3 +79,22 @@ @to_ivy_arrays_and_back def diagonal(a, offset, axis1, axis2): return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2) + + +@to_ivy_arrays_and_back +@handle_numpy_out +def compress(condition, a, axis=None, out=None): + condition_arr = ivy.asarray(condition).astype(bool) + if condition_arr.ndim != 1: + raise ivy.utils.exceptions.IvyException("Condition must be a 1D array") + if axis is None: + arr = ivy.asarray(a).flatten() + axis = 0 + else: + arr = ivy.moveaxis(a, axis, 0) + if condition_arr.shape[0] > arr.shape[0]: + raise ivy.utils.exceptions.IvyException( + "Condition contains entries that are out of bounds" + ) + arr = arr[: condition_arr.shape[0]] + return ivy.moveaxis(arr[condition_arr], 0, axis)
{"golden_diff": "diff --git a/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py b/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py\n--- a/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py\n+++ b/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py\n@@ -2,6 +2,7 @@\n from ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n inputs_to_ivy_arrays,\n+ handle_numpy_out,\n )\n \n \n@@ -78,3 +79,22 @@\n @to_ivy_arrays_and_back\n def diagonal(a, offset, axis1, axis2):\n return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)\n+\n+\n+@to_ivy_arrays_and_back\n+@handle_numpy_out\n+def compress(condition, a, axis=None, out=None):\n+ condition_arr = ivy.asarray(condition).astype(bool)\n+ if condition_arr.ndim != 1:\n+ raise ivy.utils.exceptions.IvyException(\"Condition must be a 1D array\")\n+ if axis is None:\n+ arr = ivy.asarray(a).flatten()\n+ axis = 0\n+ else:\n+ arr = ivy.moveaxis(a, axis, 0)\n+ if condition_arr.shape[0] > arr.shape[0]:\n+ raise ivy.utils.exceptions.IvyException(\n+ \"Condition contains entries that are out of bounds\"\n+ )\n+ arr = arr[: condition_arr.shape[0]]\n+ return ivy.moveaxis(arr[condition_arr], 0, axis)\n", "issue": "compress\n\n", "before_files": [{"content": "import ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n inputs_to_ivy_arrays,\n)\n\n\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis):\n return ivy.take_along_axis(arr, indices, axis)\n\n\n@to_ivy_arrays_and_back\ndef tril_indices(n, k=0, m=None):\n return ivy.tril_indices(n, m, k)\n\n\n@to_ivy_arrays_and_back\ndef indices(dimensions, dtype=int, sparse=False):\n dimensions = tuple(dimensions)\n N = len(dimensions)\n shape = (1,) * N\n if sparse:\n res = tuple()\n else:\n res = ivy.empty((N,) + dimensions, dtype=dtype)\n for i, dim in enumerate(dimensions):\n idx = ivy.arange(dim, dtype=dtype).reshape(shape[:i] + (dim,) + shape[i + 1 :])\n if sparse:\n res = res + (idx,)\n else:\n res[i] = idx\n return res\n\n\n# unravel_index\n@to_ivy_arrays_and_back\ndef unravel_index(indices, shape, order=\"C\"):\n ret = [x.astype(\"int64\") for x in ivy.unravel_index(indices, shape)]\n return tuple(ret)\n\n\n@to_ivy_arrays_and_back\ndef fill_diagonal(a, val, wrap=False):\n if a.ndim < 2:\n raise ValueError(\"array must be at least 2-d\")\n end = None\n if a.ndim == 2:\n # Explicit, fast formula for the common case. For 2-d arrays, we\n # accept rectangular ones.\n step = a.shape[1] + 1\n # This is needed to don't have tall matrix have the diagonal wrap.\n if not wrap:\n end = a.shape[1] * a.shape[1]\n else:\n # For more than d=2, the strided formula is only valid for arrays with\n # all dimensions equal, so we check first.\n if not ivy.all(ivy.diff(a.shape) == 0):\n raise ValueError(\"All dimensions of input must be of equal length\")\n step = 1 + ivy.sum(ivy.cumprod(a.shape[:-1]))\n\n # Write the value out into the diagonal.\n shape = a.shape\n a = ivy.reshape(a, a.size)\n a[:end:step] = val\n a = ivy.reshape(a, shape)\n\n\n@inputs_to_ivy_arrays\ndef put_along_axis(arr, indices, values, axis):\n ivy.put_along_axis(arr, indices, values, axis)\n\n\ndef diag(v, k=0):\n return ivy.diag(v, k=k)\n\n\n@to_ivy_arrays_and_back\ndef diagonal(a, offset, axis1, axis2):\n return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)\n", "path": "ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py"}]}
1,369
369
gh_patches_debug_16116
rasdani/github-patches
git_diff
GeotrekCE__Geotrek-admin-1273
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add overlay tiles layers from settings https://github.com/makinacorpus/django-leaflet/issues/83 Would allow to show cadastral parcels over orthophoto, "plans de circulation" etc. </issue> <code> [start of geotrek/settings/dev.py] 1 from .default import * # NOQA 2 3 # 4 # Django Development 5 # .......................... 6 7 DEBUG = True 8 TEMPLATE_DEBUG = True 9 10 SOUTH_TESTS_MIGRATE = False # Tested at settings.tests 11 12 # 13 # Developper Toolbar 14 # .......................... 15 16 INSTALLED_APPS = ( 17 # 'debug_toolbar', 18 'django_extensions', 19 ) + INSTALLED_APPS 20 21 # 22 # Use Geotrek preprod tiles (uses default extent) 23 # ................................................ 24 25 LEAFLET_CONFIG['TILES'] = [ 26 (gettext_noop('Scan'), 'http://{s}.tile.osm.org/{z}/{x}/{y}.png', '(c) OpenStreetMap Contributors'), 27 (gettext_noop('Ortho'), 'http://{s}.tiles.mapbox.com/v3/openstreetmap.map-4wvf9l0l/{z}/{x}/{y}.jpg', '(c) MapBox'), 28 ] 29 LEAFLET_CONFIG['SRID'] = 3857 30 31 LOGGING['loggers']['geotrek']['level'] = 'DEBUG' 32 LOGGING['loggers']['']['level'] = 'DEBUG' 33 [end of geotrek/settings/dev.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/geotrek/settings/dev.py b/geotrek/settings/dev.py --- a/geotrek/settings/dev.py +++ b/geotrek/settings/dev.py @@ -19,13 +19,17 @@ ) + INSTALLED_APPS # -# Use Geotrek preprod tiles (uses default extent) -# ................................................ +# Use some default tiles +# .......................... LEAFLET_CONFIG['TILES'] = [ (gettext_noop('Scan'), 'http://{s}.tile.osm.org/{z}/{x}/{y}.png', '(c) OpenStreetMap Contributors'), (gettext_noop('Ortho'), 'http://{s}.tiles.mapbox.com/v3/openstreetmap.map-4wvf9l0l/{z}/{x}/{y}.jpg', '(c) MapBox'), ] +LEAFLET_CONFIG['OVERLAYS'] = [ + (gettext_noop('Coeur de parc'), 'http://{s}.tilestream.makina-corpus.net/v2/coeur-ecrins/{z}/{x}/{y}.png', 'Ecrins'), +] + LEAFLET_CONFIG['SRID'] = 3857 LOGGING['loggers']['geotrek']['level'] = 'DEBUG'
{"golden_diff": "diff --git a/geotrek/settings/dev.py b/geotrek/settings/dev.py\n--- a/geotrek/settings/dev.py\n+++ b/geotrek/settings/dev.py\n@@ -19,13 +19,17 @@\n ) + INSTALLED_APPS\n \n #\n-# Use Geotrek preprod tiles (uses default extent)\n-# ................................................\n+# Use some default tiles\n+# ..........................\n \n LEAFLET_CONFIG['TILES'] = [\n (gettext_noop('Scan'), 'http://{s}.tile.osm.org/{z}/{x}/{y}.png', '(c) OpenStreetMap Contributors'),\n (gettext_noop('Ortho'), 'http://{s}.tiles.mapbox.com/v3/openstreetmap.map-4wvf9l0l/{z}/{x}/{y}.jpg', '(c) MapBox'),\n ]\n+LEAFLET_CONFIG['OVERLAYS'] = [\n+ (gettext_noop('Coeur de parc'), 'http://{s}.tilestream.makina-corpus.net/v2/coeur-ecrins/{z}/{x}/{y}.png', 'Ecrins'),\n+]\n+\n LEAFLET_CONFIG['SRID'] = 3857\n \n LOGGING['loggers']['geotrek']['level'] = 'DEBUG'\n", "issue": "Add overlay tiles layers from settings\nhttps://github.com/makinacorpus/django-leaflet/issues/83 \n\nWould allow to show cadastral parcels over orthophoto, \"plans de circulation\" etc.\n\n", "before_files": [{"content": "from .default import * # NOQA\n\n#\n# Django Development\n# ..........................\n\nDEBUG = True\nTEMPLATE_DEBUG = True\n\nSOUTH_TESTS_MIGRATE = False # Tested at settings.tests\n\n#\n# Developper Toolbar\n# ..........................\n\nINSTALLED_APPS = (\n # 'debug_toolbar',\n 'django_extensions',\n) + INSTALLED_APPS\n\n#\n# Use Geotrek preprod tiles (uses default extent)\n# ................................................\n\nLEAFLET_CONFIG['TILES'] = [\n (gettext_noop('Scan'), 'http://{s}.tile.osm.org/{z}/{x}/{y}.png', '(c) OpenStreetMap Contributors'),\n (gettext_noop('Ortho'), 'http://{s}.tiles.mapbox.com/v3/openstreetmap.map-4wvf9l0l/{z}/{x}/{y}.jpg', '(c) MapBox'),\n]\nLEAFLET_CONFIG['SRID'] = 3857\n\nLOGGING['loggers']['geotrek']['level'] = 'DEBUG'\nLOGGING['loggers']['']['level'] = 'DEBUG'\n", "path": "geotrek/settings/dev.py"}]}
874
273
gh_patches_debug_18592
rasdani/github-patches
git_diff
aio-libs__aiohttp-5121
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> option to disable automatic client response body decompression enhancement for https://github.com/aio-libs/aiohttp/issues/1992 </issue> <code> [start of aiohttp/resolver.py] 1 import asyncio 2 import socket 3 from typing import Any, Dict, List, Optional 4 5 from .abc import AbstractResolver 6 from .helpers import get_running_loop 7 8 __all__ = ("ThreadedResolver", "AsyncResolver", "DefaultResolver") 9 10 try: 11 import aiodns 12 13 # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname') 14 except ImportError: # pragma: no cover 15 aiodns = None 16 17 aiodns_default = False 18 19 20 class ThreadedResolver(AbstractResolver): 21 """Use Executor for synchronous getaddrinfo() calls, which defaults to 22 concurrent.futures.ThreadPoolExecutor. 23 """ 24 25 def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None) -> None: 26 self._loop = get_running_loop(loop) 27 28 async def resolve( 29 self, host: str, port: int = 0, family: int = socket.AF_INET 30 ) -> List[Dict[str, Any]]: 31 infos = await self._loop.getaddrinfo( 32 host, port, type=socket.SOCK_STREAM, family=family 33 ) 34 35 hosts = [] 36 for family, _, proto, _, address in infos: 37 if family == socket.AF_INET6 and address[3]: # type: ignore 38 # This is essential for link-local IPv6 addresses. 39 # LL IPv6 is a VERY rare case. Strictly speaking, we should use 40 # getnameinfo() unconditionally, but performance makes sense. 41 host, _port = socket.getnameinfo( 42 address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV 43 ) 44 port = int(_port) 45 else: 46 host, port = address[:2] 47 hosts.append( 48 { 49 "hostname": host, 50 "host": host, 51 "port": port, 52 "family": family, 53 "proto": proto, 54 "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV, 55 } 56 ) 57 58 return hosts 59 60 async def close(self) -> None: 61 pass 62 63 64 class AsyncResolver(AbstractResolver): 65 """Use the `aiodns` package to make asynchronous DNS lookups""" 66 67 def __init__( 68 self, 69 loop: Optional[asyncio.AbstractEventLoop] = None, 70 *args: Any, 71 **kwargs: Any 72 ) -> None: 73 if aiodns is None: 74 raise RuntimeError("Resolver requires aiodns library") 75 76 self._loop = get_running_loop(loop) 77 self._resolver = aiodns.DNSResolver(*args, loop=loop, **kwargs) 78 79 if not hasattr(self._resolver, "gethostbyname"): 80 # aiodns 1.1 is not available, fallback to DNSResolver.query 81 self.resolve = self._resolve_with_query # type: ignore 82 83 async def resolve( 84 self, host: str, port: int = 0, family: int = socket.AF_INET 85 ) -> List[Dict[str, Any]]: 86 try: 87 resp = await self._resolver.gethostbyname(host, family) 88 except aiodns.error.DNSError as exc: 89 msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed" 90 raise OSError(msg) from exc 91 hosts = [] 92 for address in resp.addresses: 93 hosts.append( 94 { 95 "hostname": host, 96 "host": address, 97 "port": port, 98 "family": family, 99 "proto": 0, 100 "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV, 101 } 102 ) 103 104 if not hosts: 105 raise OSError("DNS lookup failed") 106 107 return hosts 108 109 async def _resolve_with_query( 110 self, host: str, port: int = 0, family: int = socket.AF_INET 111 ) -> List[Dict[str, Any]]: 112 if family == socket.AF_INET6: 113 qtype = "AAAA" 114 else: 115 qtype = "A" 116 117 try: 118 resp = await self._resolver.query(host, qtype) 119 except aiodns.error.DNSError as exc: 120 msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed" 121 raise OSError(msg) from exc 122 123 hosts = [] 124 for rr in resp: 125 hosts.append( 126 { 127 "hostname": host, 128 "host": rr.host, 129 "port": port, 130 "family": family, 131 "proto": 0, 132 "flags": socket.AI_NUMERICHOST, 133 } 134 ) 135 136 if not hosts: 137 raise OSError("DNS lookup failed") 138 139 return hosts 140 141 async def close(self) -> None: 142 return self._resolver.cancel() 143 144 145 DefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver 146 [end of aiohttp/resolver.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py --- a/aiohttp/resolver.py +++ b/aiohttp/resolver.py @@ -26,10 +26,10 @@ self._loop = get_running_loop(loop) async def resolve( - self, host: str, port: int = 0, family: int = socket.AF_INET + self, hostname: str, port: int = 0, family: int = socket.AF_INET ) -> List[Dict[str, Any]]: infos = await self._loop.getaddrinfo( - host, port, type=socket.SOCK_STREAM, family=family + hostname, port, type=socket.SOCK_STREAM, family=family ) hosts = [] @@ -46,7 +46,7 @@ host, port = address[:2] hosts.append( { - "hostname": host, + "hostname": hostname, "host": host, "port": port, "family": family,
{"golden_diff": "diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py\n--- a/aiohttp/resolver.py\n+++ b/aiohttp/resolver.py\n@@ -26,10 +26,10 @@\n self._loop = get_running_loop(loop)\n \n async def resolve(\n- self, host: str, port: int = 0, family: int = socket.AF_INET\n+ self, hostname: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n infos = await self._loop.getaddrinfo(\n- host, port, type=socket.SOCK_STREAM, family=family\n+ hostname, port, type=socket.SOCK_STREAM, family=family\n )\n \n hosts = []\n@@ -46,7 +46,7 @@\n host, port = address[:2]\n hosts.append(\n {\n- \"hostname\": host,\n+ \"hostname\": hostname,\n \"host\": host,\n \"port\": port,\n \"family\": family,\n", "issue": "option to disable automatic client response body decompression\nenhancement for https://github.com/aio-libs/aiohttp/issues/1992\n", "before_files": [{"content": "import asyncio\nimport socket\nfrom typing import Any, Dict, List, Optional\n\nfrom .abc import AbstractResolver\nfrom .helpers import get_running_loop\n\n__all__ = (\"ThreadedResolver\", \"AsyncResolver\", \"DefaultResolver\")\n\ntry:\n import aiodns\n\n # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')\nexcept ImportError: # pragma: no cover\n aiodns = None\n\naiodns_default = False\n\n\nclass ThreadedResolver(AbstractResolver):\n \"\"\"Use Executor for synchronous getaddrinfo() calls, which defaults to\n concurrent.futures.ThreadPoolExecutor.\n \"\"\"\n\n def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None) -> None:\n self._loop = get_running_loop(loop)\n\n async def resolve(\n self, host: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n infos = await self._loop.getaddrinfo(\n host, port, type=socket.SOCK_STREAM, family=family\n )\n\n hosts = []\n for family, _, proto, _, address in infos:\n if family == socket.AF_INET6 and address[3]: # type: ignore\n # This is essential for link-local IPv6 addresses.\n # LL IPv6 is a VERY rare case. Strictly speaking, we should use\n # getnameinfo() unconditionally, but performance makes sense.\n host, _port = socket.getnameinfo(\n address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV\n )\n port = int(_port)\n else:\n host, port = address[:2]\n hosts.append(\n {\n \"hostname\": host,\n \"host\": host,\n \"port\": port,\n \"family\": family,\n \"proto\": proto,\n \"flags\": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n }\n )\n\n return hosts\n\n async def close(self) -> None:\n pass\n\n\nclass AsyncResolver(AbstractResolver):\n \"\"\"Use the `aiodns` package to make asynchronous DNS lookups\"\"\"\n\n def __init__(\n self,\n loop: Optional[asyncio.AbstractEventLoop] = None,\n *args: Any,\n **kwargs: Any\n ) -> None:\n if aiodns is None:\n raise RuntimeError(\"Resolver requires aiodns library\")\n\n self._loop = get_running_loop(loop)\n self._resolver = aiodns.DNSResolver(*args, loop=loop, **kwargs)\n\n if not hasattr(self._resolver, \"gethostbyname\"):\n # aiodns 1.1 is not available, fallback to DNSResolver.query\n self.resolve = self._resolve_with_query # type: ignore\n\n async def resolve(\n self, host: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n try:\n resp = await self._resolver.gethostbyname(host, family)\n except aiodns.error.DNSError as exc:\n msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n raise OSError(msg) from exc\n hosts = []\n for address in resp.addresses:\n hosts.append(\n {\n \"hostname\": host,\n \"host\": address,\n \"port\": port,\n \"family\": family,\n \"proto\": 0,\n \"flags\": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n }\n )\n\n if not hosts:\n raise OSError(\"DNS lookup failed\")\n\n return hosts\n\n async def _resolve_with_query(\n self, host: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n if family == socket.AF_INET6:\n qtype = \"AAAA\"\n else:\n qtype = \"A\"\n\n try:\n resp = await self._resolver.query(host, qtype)\n except aiodns.error.DNSError as exc:\n msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n raise OSError(msg) from exc\n\n hosts = []\n for rr in resp:\n hosts.append(\n {\n \"hostname\": host,\n \"host\": rr.host,\n \"port\": port,\n \"family\": family,\n \"proto\": 0,\n \"flags\": socket.AI_NUMERICHOST,\n }\n )\n\n if not hosts:\n raise OSError(\"DNS lookup failed\")\n\n return hosts\n\n async def close(self) -> None:\n return self._resolver.cancel()\n\n\nDefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver\n", "path": "aiohttp/resolver.py"}]}
1,930
232
gh_patches_debug_7305
rasdani/github-patches
git_diff
streamlink__streamlink-2230
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update dependencies with security vulnerabilities As mentioned here: https://github.com/streamlink/streamlink/pull/2199#issuecomment-447567326 1. urllib3 > Vulnerable versions: < 1.23 > Patched version: 1.23 > urllib3 before version 1.23 does not remove the Authorization HTTP header when following a cross-origin redirect (i.e., a redirect that differs in host, port, or scheme). This can allow for credentials in the Authorization header to be exposed to unintended hosts or transmitted in cleartext. https://nvd.nist.gov/vuln/detail/CVE-2018-20060 2. requests > Vulnerable versions: <= 2.19.1 > Patched version: 2.20.0 > The Requests package through 2.19.1 before 2018-09-14 for Python sends an HTTP Authorization header to an http URI upon receiving a same-hostname https-to-http redirect, which makes it easier for remote attackers to discover credentials by sniffing the network. https://nvd.nist.gov/vuln/detail/CVE-2018-18074 </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import codecs 3 from os import environ 4 from os import path 5 from sys import path as sys_path 6 7 from setuptools import setup, find_packages 8 9 import versioneer 10 11 deps = [ 12 # Require backport of concurrent.futures on Python 2 13 'futures;python_version<"3.0"', 14 # Require singledispatch on Python <3.4 15 'singledispatch;python_version<"3.4"', 16 "requests>=2.2,!=2.12.0,!=2.12.1,!=2.16.0,!=2.16.1,!=2.16.2,!=2.16.3,!=2.16.4,!=2.16.5,!=2.17.1,<3.0", 17 'urllib3[secure]<1.23,>=1.21.1;python_version<"3.0"', 18 "isodate", 19 "websocket-client", 20 # Support for SOCKS proxies 21 "PySocks!=1.5.7,>=1.5.6", 22 # win-inet-pton is missing a dependency in PySocks, this has been fixed but not released yet 23 # Required due to missing socket.inet_ntop & socket.inet_pton method in Windows Python 2.x 24 'win-inet-pton;python_version<"3.0" and platform_system=="Windows"', 25 # shutil.get_terminal_size and which were added in Python 3.3 26 'backports.shutil_which;python_version<"3.3"', 27 'backports.shutil_get_terminal_size;python_version<"3.3"' 28 ] 29 30 # for encrypted streams 31 if environ.get("STREAMLINK_USE_PYCRYPTO"): 32 deps.append("pycrypto") 33 else: 34 # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6 35 deps.append("pycryptodome>=3.4.3,<4") 36 37 # for localization 38 if environ.get("STREAMLINK_USE_PYCOUNTRY"): 39 deps.append("pycountry") 40 else: 41 deps.append("iso-639") 42 deps.append("iso3166") 43 44 # When we build an egg for the Win32 bootstrap we don"t want dependency 45 # information built into it. 46 if environ.get("NO_DEPS"): 47 deps = [] 48 49 this_directory = path.abspath(path.dirname(__file__)) 50 srcdir = path.join(this_directory, "src/") 51 sys_path.insert(0, srcdir) 52 53 with codecs.open(path.join(this_directory, "README.md"), 'r', "utf8") as f: 54 long_description = f.read() 55 56 setup(name="streamlink", 57 version=versioneer.get_version(), 58 cmdclass=versioneer.get_cmdclass(), 59 description="Streamlink is command-line utility that extracts streams " 60 "from various services and pipes them into a video player of " 61 "choice.", 62 long_description=long_description, 63 long_description_content_type="text/markdown", 64 url="https://github.com/streamlink/streamlink", 65 project_urls={ 66 "Documentation": "https://streamlink.github.io/", 67 "Tracker": "https://github.com/streamlink/streamlink/issues", 68 "Source": "https://github.com/streamlink/streamlink", 69 "Funding": "https://opencollective.com/streamlink" 70 }, 71 author="Streamlink", 72 # temp until we have a mailing list / global email 73 author_email="[email protected]", 74 license="Simplified BSD", 75 packages=find_packages("src"), 76 package_dir={"": "src"}, 77 entry_points={ 78 "console_scripts": ["streamlink=streamlink_cli.main:main"] 79 }, 80 install_requires=deps, 81 test_suite="tests", 82 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4", 83 classifiers=["Development Status :: 5 - Production/Stable", 84 "License :: OSI Approved :: BSD License", 85 "Environment :: Console", 86 "Intended Audience :: End Users/Desktop", 87 "Operating System :: POSIX", 88 "Operating System :: Microsoft :: Windows", 89 "Operating System :: MacOS", 90 "Programming Language :: Python :: 2.7", 91 "Programming Language :: Python :: 3.4", 92 "Programming Language :: Python :: 3.5", 93 "Programming Language :: Python :: 3.6", 94 "Programming Language :: Python :: 3.7", 95 "Topic :: Internet :: WWW/HTTP", 96 "Topic :: Multimedia :: Sound/Audio", 97 "Topic :: Multimedia :: Video", 98 "Topic :: Utilities"]) 99 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -13,8 +13,8 @@ 'futures;python_version<"3.0"', # Require singledispatch on Python <3.4 'singledispatch;python_version<"3.4"', - "requests>=2.2,!=2.12.0,!=2.12.1,!=2.16.0,!=2.16.1,!=2.16.2,!=2.16.3,!=2.16.4,!=2.16.5,!=2.17.1,<3.0", - 'urllib3[secure]<1.23,>=1.21.1;python_version<"3.0"', + "requests>=2.21.0,<3.0", + 'urllib3[secure]>=1.23;python_version<"3.0"', "isodate", "websocket-client", # Support for SOCKS proxies
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -13,8 +13,8 @@\n 'futures;python_version<\"3.0\"',\n # Require singledispatch on Python <3.4\n 'singledispatch;python_version<\"3.4\"',\n- \"requests>=2.2,!=2.12.0,!=2.12.1,!=2.16.0,!=2.16.1,!=2.16.2,!=2.16.3,!=2.16.4,!=2.16.5,!=2.17.1,<3.0\",\n- 'urllib3[secure]<1.23,>=1.21.1;python_version<\"3.0\"',\n+ \"requests>=2.21.0,<3.0\",\n+ 'urllib3[secure]>=1.23;python_version<\"3.0\"',\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n", "issue": "Update dependencies with security vulnerabilities\nAs mentioned here:\r\nhttps://github.com/streamlink/streamlink/pull/2199#issuecomment-447567326\r\n\r\n1. urllib3\r\n > Vulnerable versions: < 1.23\r\n > Patched version: 1.23\r\n > urllib3 before version 1.23 does not remove the Authorization HTTP header when following a cross-origin redirect (i.e., a redirect that differs in host, port, or scheme). This can allow for credentials in the Authorization header to be exposed to unintended hosts or transmitted in cleartext.\r\n https://nvd.nist.gov/vuln/detail/CVE-2018-20060\r\n2. requests\r\n > Vulnerable versions: <= 2.19.1\r\n > Patched version: 2.20.0\r\n > The Requests package through 2.19.1 before 2018-09-14 for Python sends an HTTP Authorization header to an http URI upon receiving a same-hostname https-to-http redirect, which makes it easier for remote attackers to discover credentials by sniffing the network.\r\n https://nvd.nist.gov/vuln/detail/CVE-2018-18074\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ\nfrom os import path\nfrom sys import path as sys_path\n\nfrom setuptools import setup, find_packages\n\nimport versioneer\n\ndeps = [\n # Require backport of concurrent.futures on Python 2\n 'futures;python_version<\"3.0\"',\n # Require singledispatch on Python <3.4\n 'singledispatch;python_version<\"3.4\"',\n \"requests>=2.2,!=2.12.0,!=2.12.1,!=2.16.0,!=2.16.1,!=2.16.2,!=2.16.3,!=2.16.4,!=2.16.5,!=2.17.1,<3.0\",\n 'urllib3[secure]<1.23,>=1.21.1;python_version<\"3.0\"',\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n # win-inet-pton is missing a dependency in PySocks, this has been fixed but not released yet\n # Required due to missing socket.inet_ntop & socket.inet_pton method in Windows Python 2.x\n 'win-inet-pton;python_version<\"3.0\" and platform_system==\"Windows\"',\n # shutil.get_terminal_size and which were added in Python 3.3\n 'backports.shutil_which;python_version<\"3.3\"',\n 'backports.shutil_get_terminal_size;python_version<\"3.3\"'\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points={\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n },\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}]}
2,014
241
gh_patches_debug_583
rasdani/github-patches
git_diff
pex-tool__pex-1750
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.85 On the docket: + [x] PEX interpreters should support all underlying Python interpreter options. #1745 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.84" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.84" +__version__ = "2.1.85"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.84\"\n+__version__ = \"2.1.85\"\n", "issue": "Release 2.1.85\nOn the docket:\r\n+ [x] PEX interpreters should support all underlying Python interpreter options. #1745\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.84\"\n", "path": "pex/version.py"}]}
619
97
gh_patches_debug_2856
rasdani/github-patches
git_diff
ResonantGeoData__ResonantGeoData-223
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> _convert_to_cog function doesn't generate COG The current _convert_to_cog function (https://github.com/ResonantGeoData/ResonantGeoData/blob/master/rgd/geodata/models/imagery/subsample.py#L32-L51) doesn't output a COG; it just outputs a tiled tiff file. GDAL has two separate format writers; one for regular TIFF and one for COG. Without the '-of COG' option, the regular tiff writer is invoked. The options available to the two writers differ as well: for COG you can specify a predictor with a string value, for regular tiff you must specify a numeric predictor value. Using lzw compression, I'd recommend the following options: `['-co', 'COMPRESS=LZW', '-co', 'PREDICTOR=YES', '-of', 'COG']`. The default block (tile) size in the COG writer is 512 pixels; some tile servers or consumers seem to prefer 256; if so, add `-co BLOCKSIZE=256`. You can use tiffdump to see that COG files have multiple directories at different resolutions while non-COG files have a single directory. </issue> <code> [start of rgd/geodata/models/imagery/subsample.py] 1 """Tasks for subsampling images with GDAL.""" 2 import os 3 import tempfile 4 5 from celery.utils.log import get_task_logger 6 from django.conf import settings 7 from girder_utils.files import field_file_to_local_path 8 from osgeo import gdal 9 10 from ..common import ArbitraryFile 11 from .base import ConvertedImageFile 12 13 logger = get_task_logger(__name__) 14 15 16 def _gdal_translate(source_field, output_field, **kwargs): 17 workdir = getattr(settings, 'GEODATA_WORKDIR', None) 18 tmpdir = tempfile.mkdtemp(dir=workdir) 19 20 with field_file_to_local_path(source_field) as file_path: 21 logger.info(f'The image file path: {file_path}') 22 output_path = os.path.join(tmpdir, 'subsampled_' + os.path.basename(file_path)) 23 ds = gdal.Open(str(file_path)) 24 ds = gdal.Translate(output_path, ds, **kwargs) 25 ds = None 26 27 output_field.save(os.path.basename(output_path), open(output_path, 'rb')) 28 29 return 30 31 32 def convert_to_cog(cog_id): 33 """Populate ConvertedImageFile with COG file.""" 34 options = [ 35 '-co', 36 'COMPRESS=LZW', 37 '-co', 38 'TILED=YES', 39 ] 40 cog = ConvertedImageFile.objects.get(id=cog_id) 41 cog.converted_file = ArbitraryFile() 42 src = cog.source_image.image_file.imagefile.file 43 output = cog.converted_file.file 44 _gdal_translate(src, output, options=options) 45 cog.converted_file.save() 46 cog.save( 47 update_fields=[ 48 'converted_file', 49 ] 50 ) 51 return 52 [end of rgd/geodata/models/imagery/subsample.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rgd/geodata/models/imagery/subsample.py b/rgd/geodata/models/imagery/subsample.py --- a/rgd/geodata/models/imagery/subsample.py +++ b/rgd/geodata/models/imagery/subsample.py @@ -35,7 +35,11 @@ '-co', 'COMPRESS=LZW', '-co', - 'TILED=YES', + 'PREDICTOR=YES', + '-of', + 'COG', + '-co', + 'BLOCKSIZE=256', ] cog = ConvertedImageFile.objects.get(id=cog_id) cog.converted_file = ArbitraryFile()
{"golden_diff": "diff --git a/rgd/geodata/models/imagery/subsample.py b/rgd/geodata/models/imagery/subsample.py\n--- a/rgd/geodata/models/imagery/subsample.py\n+++ b/rgd/geodata/models/imagery/subsample.py\n@@ -35,7 +35,11 @@\n '-co',\n 'COMPRESS=LZW',\n '-co',\n- 'TILED=YES',\n+ 'PREDICTOR=YES',\n+ '-of',\n+ 'COG',\n+ '-co',\n+ 'BLOCKSIZE=256',\n ]\n cog = ConvertedImageFile.objects.get(id=cog_id)\n cog.converted_file = ArbitraryFile()\n", "issue": "_convert_to_cog function doesn't generate COG\nThe current _convert_to_cog function (https://github.com/ResonantGeoData/ResonantGeoData/blob/master/rgd/geodata/models/imagery/subsample.py#L32-L51) doesn't output a COG; it just outputs a tiled tiff file. GDAL has two separate format writers; one for regular TIFF and one for COG. Without the '-of COG' option, the regular tiff writer is invoked. The options available to the two writers differ as well: for COG you can specify a predictor with a string value, for regular tiff you must specify a numeric predictor value.\r\n\r\nUsing lzw compression, I'd recommend the following options: `['-co', 'COMPRESS=LZW', '-co', 'PREDICTOR=YES', '-of', 'COG']`. The default block (tile) size in the COG writer is 512 pixels; some tile servers or consumers seem to prefer 256; if so, add `-co BLOCKSIZE=256`.\r\n\r\nYou can use tiffdump to see that COG files have multiple directories at different resolutions while non-COG files have a single directory.\r\n\n", "before_files": [{"content": "\"\"\"Tasks for subsampling images with GDAL.\"\"\"\nimport os\nimport tempfile\n\nfrom celery.utils.log import get_task_logger\nfrom django.conf import settings\nfrom girder_utils.files import field_file_to_local_path\nfrom osgeo import gdal\n\nfrom ..common import ArbitraryFile\nfrom .base import ConvertedImageFile\n\nlogger = get_task_logger(__name__)\n\n\ndef _gdal_translate(source_field, output_field, **kwargs):\n workdir = getattr(settings, 'GEODATA_WORKDIR', None)\n tmpdir = tempfile.mkdtemp(dir=workdir)\n\n with field_file_to_local_path(source_field) as file_path:\n logger.info(f'The image file path: {file_path}')\n output_path = os.path.join(tmpdir, 'subsampled_' + os.path.basename(file_path))\n ds = gdal.Open(str(file_path))\n ds = gdal.Translate(output_path, ds, **kwargs)\n ds = None\n\n output_field.save(os.path.basename(output_path), open(output_path, 'rb'))\n\n return\n\n\ndef convert_to_cog(cog_id):\n \"\"\"Populate ConvertedImageFile with COG file.\"\"\"\n options = [\n '-co',\n 'COMPRESS=LZW',\n '-co',\n 'TILED=YES',\n ]\n cog = ConvertedImageFile.objects.get(id=cog_id)\n cog.converted_file = ArbitraryFile()\n src = cog.source_image.image_file.imagefile.file\n output = cog.converted_file.file\n _gdal_translate(src, output, options=options)\n cog.converted_file.save()\n cog.save(\n update_fields=[\n 'converted_file',\n ]\n )\n return\n", "path": "rgd/geodata/models/imagery/subsample.py"}]}
1,264
155
gh_patches_debug_25458
rasdani/github-patches
git_diff
docker__docker-py-1115
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support create volume Labels options First, check the remote API: https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/ ``` { "Name": "tardis", "Labels": { "com.example.some-label": "some-value", "com.example.some-other-label": "some-other-value" }, } ``` It provides `Labels` options. But docker-py doesn't support it right not. Second, in docker-py's document give the following example: ``` volume = cli.create_volume( name='foobar', driver='local', driver_opts={'foo': 'bar', 'baz': 'false'} docker.errors.APIError: 500 Server Error: Internal Server Error ("create foobar: invalid option key: "foo"") ``` But driver_opts is not working. What kinds of driver_opts can we use? </issue> <code> [start of docker/api/volume.py] 1 from .. import utils 2 3 4 class VolumeApiMixin(object): 5 @utils.minimum_version('1.21') 6 def volumes(self, filters=None): 7 params = { 8 'filters': utils.convert_filters(filters) if filters else None 9 } 10 url = self._url('/volumes') 11 return self._result(self._get(url, params=params), True) 12 13 @utils.minimum_version('1.21') 14 def create_volume(self, name, driver=None, driver_opts=None): 15 url = self._url('/volumes/create') 16 if driver_opts is not None and not isinstance(driver_opts, dict): 17 raise TypeError('driver_opts must be a dictionary') 18 19 data = { 20 'Name': name, 21 'Driver': driver, 22 'DriverOpts': driver_opts, 23 } 24 return self._result(self._post_json(url, data=data), True) 25 26 @utils.minimum_version('1.21') 27 def inspect_volume(self, name): 28 url = self._url('/volumes/{0}', name) 29 return self._result(self._get(url), True) 30 31 @utils.minimum_version('1.21') 32 def remove_volume(self, name): 33 url = self._url('/volumes/{0}', name) 34 resp = self._delete(url) 35 self._raise_for_status(resp) 36 [end of docker/api/volume.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docker/api/volume.py b/docker/api/volume.py --- a/docker/api/volume.py +++ b/docker/api/volume.py @@ -1,3 +1,4 @@ +from .. import errors from .. import utils @@ -11,7 +12,7 @@ return self._result(self._get(url, params=params), True) @utils.minimum_version('1.21') - def create_volume(self, name, driver=None, driver_opts=None): + def create_volume(self, name, driver=None, driver_opts=None, labels=None): url = self._url('/volumes/create') if driver_opts is not None and not isinstance(driver_opts, dict): raise TypeError('driver_opts must be a dictionary') @@ -21,6 +22,16 @@ 'Driver': driver, 'DriverOpts': driver_opts, } + + if labels is not None: + if utils.compare_version('1.23', self._version) < 0: + raise errors.InvalidVersion( + 'volume labels were introduced in API 1.23' + ) + if not isinstance(labels, dict): + raise TypeError('labels must be a dictionary') + data["Labels"] = labels + return self._result(self._post_json(url, data=data), True) @utils.minimum_version('1.21')
{"golden_diff": "diff --git a/docker/api/volume.py b/docker/api/volume.py\n--- a/docker/api/volume.py\n+++ b/docker/api/volume.py\n@@ -1,3 +1,4 @@\n+from .. import errors\n from .. import utils\n \n \n@@ -11,7 +12,7 @@\n return self._result(self._get(url, params=params), True)\n \n @utils.minimum_version('1.21')\n- def create_volume(self, name, driver=None, driver_opts=None):\n+ def create_volume(self, name, driver=None, driver_opts=None, labels=None):\n url = self._url('/volumes/create')\n if driver_opts is not None and not isinstance(driver_opts, dict):\n raise TypeError('driver_opts must be a dictionary')\n@@ -21,6 +22,16 @@\n 'Driver': driver,\n 'DriverOpts': driver_opts,\n }\n+\n+ if labels is not None:\n+ if utils.compare_version('1.23', self._version) < 0:\n+ raise errors.InvalidVersion(\n+ 'volume labels were introduced in API 1.23'\n+ )\n+ if not isinstance(labels, dict):\n+ raise TypeError('labels must be a dictionary')\n+ data[\"Labels\"] = labels\n+\n return self._result(self._post_json(url, data=data), True)\n \n @utils.minimum_version('1.21')\n", "issue": "Support create volume Labels options\nFirst, check the remote API:\nhttps://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\n\n```\n {\n \"Name\": \"tardis\",\n \"Labels\": {\n \"com.example.some-label\": \"some-value\",\n \"com.example.some-other-label\": \"some-other-value\"\n },\n}\n```\n\nIt provides `Labels` options. But docker-py doesn't support it right not.\n\nSecond, in docker-py's document give the following example:\n\n```\nvolume = cli.create_volume(\n name='foobar', driver='local', driver_opts={'foo': 'bar', 'baz': 'false'}\n\ndocker.errors.APIError: 500 Server Error: Internal Server Error (\"create foobar: invalid option key: \"foo\"\")\n```\n\nBut driver_opts is not working. What kinds of driver_opts can we use?\n\n", "before_files": [{"content": "from .. import utils\n\n\nclass VolumeApiMixin(object):\n @utils.minimum_version('1.21')\n def volumes(self, filters=None):\n params = {\n 'filters': utils.convert_filters(filters) if filters else None\n }\n url = self._url('/volumes')\n return self._result(self._get(url, params=params), True)\n\n @utils.minimum_version('1.21')\n def create_volume(self, name, driver=None, driver_opts=None):\n url = self._url('/volumes/create')\n if driver_opts is not None and not isinstance(driver_opts, dict):\n raise TypeError('driver_opts must be a dictionary')\n\n data = {\n 'Name': name,\n 'Driver': driver,\n 'DriverOpts': driver_opts,\n }\n return self._result(self._post_json(url, data=data), True)\n\n @utils.minimum_version('1.21')\n def inspect_volume(self, name):\n url = self._url('/volumes/{0}', name)\n return self._result(self._get(url), True)\n\n @utils.minimum_version('1.21')\n def remove_volume(self, name):\n url = self._url('/volumes/{0}', name)\n resp = self._delete(url)\n self._raise_for_status(resp)\n", "path": "docker/api/volume.py"}]}
1,063
307
gh_patches_debug_25638
rasdani/github-patches
git_diff
beetbox__beets-1138
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> smartplaylist: Respect sort terms in queries It would be nice to be able sort items in smart playlists, like so: ``` smartplaylist: relative_to: ~/music playlist_dir: ~/.mpd/playlists playlists: - name: '2014.m3u' query: 'year:2014 added+' - name: 'imported-%time{$added,%Y-%m}.m3u' query: "added:: path+" ``` I'm unfamiliar with the code but it looks like the smartplaylist plugin [discards the query `sort` field](https://github.com/sampsyo/beets/blob/master/beetsplug/smartplaylist.py#L45) when building playlists. Would it be possible to keep the custom sort? </issue> <code> [start of beetsplug/smartplaylist.py] 1 # This file is part of beets. 2 # Copyright 2013, Dang Mai <[email protected]>. 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining 5 # a copy of this software and associated documentation files (the 6 # "Software"), to deal in the Software without restriction, including 7 # without limitation the rights to use, copy, modify, merge, publish, 8 # distribute, sublicense, and/or sell copies of the Software, and to 9 # permit persons to whom the Software is furnished to do so, subject to 10 # the following conditions: 11 # 12 # The above copyright notice and this permission notice shall be 13 # included in all copies or substantial portions of the Software. 14 15 """Generates smart playlists based on beets queries. 16 """ 17 from __future__ import print_function 18 19 from beets.plugins import BeetsPlugin 20 from beets import config, ui, library 21 from beets import dbcore 22 from beets.util import normpath, syspath 23 import os 24 25 # Global variable so that smartplaylist can detect database changes and run 26 # only once before beets exits. 27 database_changed = False 28 29 30 def _items_for_query(lib, playlist, album=False): 31 """Get the matching items for a playlist's configured queries. 32 `album` indicates whether to process the item-level query or the 33 album-level query (if any). 34 """ 35 key = 'album_query' if album else 'query' 36 if key not in playlist: 37 return [] 38 39 # Parse quer(ies). If it's a list, join the queries with OR. 40 query_strings = playlist[key] 41 if not isinstance(query_strings, (list, tuple)): 42 query_strings = [query_strings] 43 model = library.Album if album else library.Item 44 query = dbcore.OrQuery( 45 [library.parse_query_string(q, model)[0] for q in query_strings] 46 ) 47 48 # Execute query, depending on type. 49 if album: 50 result = [] 51 for album in lib.albums(query): 52 result.extend(album.items()) 53 return result 54 else: 55 return lib.items(query) 56 57 58 def update_playlists(lib): 59 ui.print_("Updating smart playlists...") 60 playlists = config['smartplaylist']['playlists'].get(list) 61 playlist_dir = config['smartplaylist']['playlist_dir'].as_filename() 62 relative_to = config['smartplaylist']['relative_to'].get() 63 if relative_to: 64 relative_to = normpath(relative_to) 65 66 for playlist in playlists: 67 items = [] 68 items.extend(_items_for_query(lib, playlist, True)) 69 items.extend(_items_for_query(lib, playlist, False)) 70 71 m3us = {} 72 basename = playlist['name'].encode('utf8') 73 # As we allow tags in the m3u names, we'll need to iterate through 74 # the items and generate the correct m3u file names. 75 for item in items: 76 m3u_name = item.evaluate_template(basename, True) 77 if not (m3u_name in m3us): 78 m3us[m3u_name] = [] 79 item_path = item.path 80 if relative_to: 81 item_path = os.path.relpath(item.path, relative_to) 82 if item_path not in m3us[m3u_name]: 83 m3us[m3u_name].append(item_path) 84 # Now iterate through the m3us that we need to generate 85 for m3u in m3us: 86 m3u_path = normpath(os.path.join(playlist_dir, m3u)) 87 with open(syspath(m3u_path), 'w') as f: 88 for path in m3us[m3u]: 89 f.write(path + '\n') 90 ui.print_("... Done") 91 92 93 class SmartPlaylistPlugin(BeetsPlugin): 94 def __init__(self): 95 super(SmartPlaylistPlugin, self).__init__() 96 self.config.add({ 97 'relative_to': None, 98 'playlist_dir': u'.', 99 'auto': True, 100 'playlists': [] 101 }) 102 103 def commands(self): 104 def update(lib, opts, args): 105 update_playlists(lib) 106 spl_update = ui.Subcommand('splupdate', 107 help='update the smart playlists') 108 spl_update.func = update 109 return [spl_update] 110 111 112 @SmartPlaylistPlugin.listen('database_change') 113 def handle_change(lib): 114 global database_changed 115 database_changed = True 116 117 118 @SmartPlaylistPlugin.listen('cli_exit') 119 def update(lib): 120 auto = config['smartplaylist']['auto'] 121 if database_changed and auto: 122 update_playlists(lib) 123 [end of beetsplug/smartplaylist.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/beetsplug/smartplaylist.py b/beetsplug/smartplaylist.py --- a/beetsplug/smartplaylist.py +++ b/beetsplug/smartplaylist.py @@ -18,7 +18,6 @@ from beets.plugins import BeetsPlugin from beets import config, ui, library -from beets import dbcore from beets.util import normpath, syspath import os @@ -36,23 +35,21 @@ if key not in playlist: return [] - # Parse quer(ies). If it's a list, join the queries with OR. + # Parse quer(ies). If it's a list, perform the queries and manually + # concatenate the results query_strings = playlist[key] if not isinstance(query_strings, (list, tuple)): query_strings = [query_strings] model = library.Album if album else library.Item - query = dbcore.OrQuery( - [library.parse_query_string(q, model)[0] for q in query_strings] - ) - - # Execute query, depending on type. - if album: - result = [] - for album in lib.albums(query): - result.extend(album.items()) - return result - else: - return lib.items(query) + results = [] + for q in query_strings: + querystr, sort = library.parse_query_string(q, model) + if album: + new = lib.albums(querystr, sort) + else: + new = lib.items(querystr, sort) + results.extend(new) + return results def update_playlists(lib):
{"golden_diff": "diff --git a/beetsplug/smartplaylist.py b/beetsplug/smartplaylist.py\n--- a/beetsplug/smartplaylist.py\n+++ b/beetsplug/smartplaylist.py\n@@ -18,7 +18,6 @@\n \n from beets.plugins import BeetsPlugin\n from beets import config, ui, library\n-from beets import dbcore\n from beets.util import normpath, syspath\n import os\n \n@@ -36,23 +35,21 @@\n if key not in playlist:\n return []\n \n- # Parse quer(ies). If it's a list, join the queries with OR.\n+ # Parse quer(ies). If it's a list, perform the queries and manually\n+ # concatenate the results\n query_strings = playlist[key]\n if not isinstance(query_strings, (list, tuple)):\n query_strings = [query_strings]\n model = library.Album if album else library.Item\n- query = dbcore.OrQuery(\n- [library.parse_query_string(q, model)[0] for q in query_strings]\n- )\n-\n- # Execute query, depending on type.\n- if album:\n- result = []\n- for album in lib.albums(query):\n- result.extend(album.items())\n- return result\n- else:\n- return lib.items(query)\n+ results = []\n+ for q in query_strings:\n+ querystr, sort = library.parse_query_string(q, model)\n+ if album:\n+ new = lib.albums(querystr, sort)\n+ else:\n+ new = lib.items(querystr, sort)\n+ results.extend(new)\n+ return results\n \n \n def update_playlists(lib):\n", "issue": "smartplaylist: Respect sort terms in queries\nIt would be nice to be able sort items in smart playlists, like so:\n\n```\nsmartplaylist:\n relative_to: ~/music\n playlist_dir: ~/.mpd/playlists\n playlists:\n - name: '2014.m3u'\n query: 'year:2014 added+'\n - name: 'imported-%time{$added,%Y-%m}.m3u'\n query: \"added:: path+\"\n```\n\nI'm unfamiliar with the code but it looks like the smartplaylist plugin [discards the query `sort` field](https://github.com/sampsyo/beets/blob/master/beetsplug/smartplaylist.py#L45) when building playlists. Would it be possible to keep the custom sort?\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2013, Dang Mai <[email protected]>.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Generates smart playlists based on beets queries.\n\"\"\"\nfrom __future__ import print_function\n\nfrom beets.plugins import BeetsPlugin\nfrom beets import config, ui, library\nfrom beets import dbcore\nfrom beets.util import normpath, syspath\nimport os\n\n# Global variable so that smartplaylist can detect database changes and run\n# only once before beets exits.\ndatabase_changed = False\n\n\ndef _items_for_query(lib, playlist, album=False):\n \"\"\"Get the matching items for a playlist's configured queries.\n `album` indicates whether to process the item-level query or the\n album-level query (if any).\n \"\"\"\n key = 'album_query' if album else 'query'\n if key not in playlist:\n return []\n\n # Parse quer(ies). If it's a list, join the queries with OR.\n query_strings = playlist[key]\n if not isinstance(query_strings, (list, tuple)):\n query_strings = [query_strings]\n model = library.Album if album else library.Item\n query = dbcore.OrQuery(\n [library.parse_query_string(q, model)[0] for q in query_strings]\n )\n\n # Execute query, depending on type.\n if album:\n result = []\n for album in lib.albums(query):\n result.extend(album.items())\n return result\n else:\n return lib.items(query)\n\n\ndef update_playlists(lib):\n ui.print_(\"Updating smart playlists...\")\n playlists = config['smartplaylist']['playlists'].get(list)\n playlist_dir = config['smartplaylist']['playlist_dir'].as_filename()\n relative_to = config['smartplaylist']['relative_to'].get()\n if relative_to:\n relative_to = normpath(relative_to)\n\n for playlist in playlists:\n items = []\n items.extend(_items_for_query(lib, playlist, True))\n items.extend(_items_for_query(lib, playlist, False))\n\n m3us = {}\n basename = playlist['name'].encode('utf8')\n # As we allow tags in the m3u names, we'll need to iterate through\n # the items and generate the correct m3u file names.\n for item in items:\n m3u_name = item.evaluate_template(basename, True)\n if not (m3u_name in m3us):\n m3us[m3u_name] = []\n item_path = item.path\n if relative_to:\n item_path = os.path.relpath(item.path, relative_to)\n if item_path not in m3us[m3u_name]:\n m3us[m3u_name].append(item_path)\n # Now iterate through the m3us that we need to generate\n for m3u in m3us:\n m3u_path = normpath(os.path.join(playlist_dir, m3u))\n with open(syspath(m3u_path), 'w') as f:\n for path in m3us[m3u]:\n f.write(path + '\\n')\n ui.print_(\"... Done\")\n\n\nclass SmartPlaylistPlugin(BeetsPlugin):\n def __init__(self):\n super(SmartPlaylistPlugin, self).__init__()\n self.config.add({\n 'relative_to': None,\n 'playlist_dir': u'.',\n 'auto': True,\n 'playlists': []\n })\n\n def commands(self):\n def update(lib, opts, args):\n update_playlists(lib)\n spl_update = ui.Subcommand('splupdate',\n help='update the smart playlists')\n spl_update.func = update\n return [spl_update]\n\n\[email protected]('database_change')\ndef handle_change(lib):\n global database_changed\n database_changed = True\n\n\[email protected]('cli_exit')\ndef update(lib):\n auto = config['smartplaylist']['auto']\n if database_changed and auto:\n update_playlists(lib)\n", "path": "beetsplug/smartplaylist.py"}]}
1,941
368
gh_patches_debug_17584
rasdani/github-patches
git_diff
geopandas__geopandas-379
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ENH: preserve column order in read_file Idea from http://gis.stackexchange.com/questions/216963/preserve-column-order-of-geopandas-file-read/217084#217084 For shapefiles, fiona saves the properties in the `meta` attribute as an OrderedDict, so we should be able to get the actual order of the columns in the file from that and rearrange the columns of the output accordingly. </issue> <code> [start of geopandas/io/file.py] 1 import os 2 3 import fiona 4 import numpy as np 5 from shapely.geometry import mapping 6 7 from six import iteritems 8 from geopandas import GeoDataFrame 9 10 11 def read_file(filename, **kwargs): 12 """ 13 Returns a GeoDataFrame from a file. 14 15 *filename* is either the absolute or relative path to the file to be 16 opened and *kwargs* are keyword args to be passed to the `open` method 17 in the fiona library when opening the file. For more information on 18 possible keywords, type: ``import fiona; help(fiona.open)`` 19 """ 20 bbox = kwargs.pop('bbox', None) 21 with fiona.open(filename, **kwargs) as f: 22 crs = f.crs 23 if bbox is not None: 24 assert len(bbox)==4 25 f_filt = f.filter(bbox=bbox) 26 else: 27 f_filt = f 28 gdf = GeoDataFrame.from_features(f_filt, crs=crs) 29 30 return gdf 31 32 33 def to_file(df, filename, driver="ESRI Shapefile", schema=None, 34 **kwargs): 35 """ 36 Write this GeoDataFrame to an OGR data source 37 38 A dictionary of supported OGR providers is available via: 39 >>> import fiona 40 >>> fiona.supported_drivers 41 42 Parameters 43 ---------- 44 df : GeoDataFrame to be written 45 filename : string 46 File path or file handle to write to. 47 driver : string, default 'ESRI Shapefile' 48 The OGR format driver used to write the vector file. 49 schema : dict, default None 50 If specified, the schema dictionary is passed to Fiona to 51 better control how the file is written. If None, GeoPandas 52 will determine the schema based on each column's dtype 53 54 The *kwargs* are passed to fiona.open and can be used to write 55 to multi-layer data, store data within archives (zip files), etc. 56 """ 57 if schema is None: 58 schema = infer_schema(df) 59 filename = os.path.abspath(os.path.expanduser(filename)) 60 with fiona.open(filename, 'w', driver=driver, crs=df.crs, 61 schema=schema, **kwargs) as c: 62 for feature in df.iterfeatures(): 63 c.write(feature) 64 65 66 def infer_schema(df): 67 try: 68 from collections import OrderedDict 69 except ImportError: 70 from ordereddict import OrderedDict 71 72 def convert_type(in_type): 73 if in_type == object: 74 return 'str' 75 out_type = type(np.asscalar(np.zeros(1, in_type))).__name__ 76 if out_type == 'long': 77 out_type = 'int' 78 return out_type 79 80 properties = OrderedDict([ 81 (col, convert_type(_type)) for col, _type in 82 zip(df.columns, df.dtypes) if col != df._geometry_column_name 83 ]) 84 85 geom_type = _common_geom_type(df) 86 if not geom_type: 87 raise ValueError("Geometry column cannot contain mutiple " 88 "geometry types when writing to file.") 89 90 schema = {'geometry': geom_type, 'properties': properties} 91 92 return schema 93 94 95 def _common_geom_type(df): 96 # Need to check geom_types before we write to file... 97 # Some (most?) providers expect a single geometry type: 98 # Point, LineString, or Polygon 99 geom_types = df.geometry.geom_type.unique() 100 101 from os.path import commonprefix # To find longest common prefix 102 geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse 103 if not geom_type: 104 geom_type = None 105 106 return geom_type 107 [end of geopandas/io/file.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/geopandas/io/file.py b/geopandas/io/file.py --- a/geopandas/io/file.py +++ b/geopandas/io/file.py @@ -14,7 +14,7 @@ *filename* is either the absolute or relative path to the file to be opened and *kwargs* are keyword args to be passed to the `open` method - in the fiona library when opening the file. For more information on + in the fiona library when opening the file. For more information on possible keywords, type: ``import fiona; help(fiona.open)`` """ bbox = kwargs.pop('bbox', None) @@ -27,6 +27,10 @@ f_filt = f gdf = GeoDataFrame.from_features(f_filt, crs=crs) + # re-order with column order from metadata, with geometry last + columns = list(f.meta["schema"]["properties"]) + ["geometry"] + gdf = gdf[columns] + return gdf
{"golden_diff": "diff --git a/geopandas/io/file.py b/geopandas/io/file.py\n--- a/geopandas/io/file.py\n+++ b/geopandas/io/file.py\n@@ -14,7 +14,7 @@\n \n *filename* is either the absolute or relative path to the file to be\n opened and *kwargs* are keyword args to be passed to the `open` method\n- in the fiona library when opening the file. For more information on \n+ in the fiona library when opening the file. For more information on\n possible keywords, type: ``import fiona; help(fiona.open)``\n \"\"\"\n bbox = kwargs.pop('bbox', None)\n@@ -27,6 +27,10 @@\n f_filt = f\n gdf = GeoDataFrame.from_features(f_filt, crs=crs)\n \n+ # re-order with column order from metadata, with geometry last\n+ columns = list(f.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n+ gdf = gdf[columns]\n+\n return gdf\n", "issue": "ENH: preserve column order in read_file\nIdea from http://gis.stackexchange.com/questions/216963/preserve-column-order-of-geopandas-file-read/217084#217084\r\n\r\nFor shapefiles, fiona saves the properties in the `meta` attribute as an OrderedDict, so we should be able to get the actual order of the columns in the file from that and rearrange the columns of the output accordingly.\n", "before_files": [{"content": "import os\n\nimport fiona\nimport numpy as np\nfrom shapely.geometry import mapping\n\nfrom six import iteritems\nfrom geopandas import GeoDataFrame\n\n\ndef read_file(filename, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file.\n\n *filename* is either the absolute or relative path to the file to be\n opened and *kwargs* are keyword args to be passed to the `open` method\n in the fiona library when opening the file. For more information on \n possible keywords, type: ``import fiona; help(fiona.open)``\n \"\"\"\n bbox = kwargs.pop('bbox', None)\n with fiona.open(filename, **kwargs) as f:\n crs = f.crs\n if bbox is not None:\n assert len(bbox)==4\n f_filt = f.filter(bbox=bbox)\n else:\n f_filt = f\n gdf = GeoDataFrame.from_features(f_filt, crs=crs)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as c:\n for feature in df.iterfeatures():\n c.write(feature)\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(_type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n geom_type = _common_geom_type(df)\n if not geom_type:\n raise ValueError(\"Geometry column cannot contain mutiple \"\n \"geometry types when writing to file.\")\n\n schema = {'geometry': geom_type, 'properties': properties}\n\n return schema\n\n\ndef _common_geom_type(df):\n # Need to check geom_types before we write to file...\n # Some (most?) providers expect a single geometry type:\n # Point, LineString, or Polygon\n geom_types = df.geometry.geom_type.unique()\n\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse\n if not geom_type:\n geom_type = None\n\n return geom_type\n", "path": "geopandas/io/file.py"}]}
1,643
233
gh_patches_debug_949
rasdani/github-patches
git_diff
scikit-hep__pyhf-457
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Determine if papermill v1.0 API change is a problem # Description The [papermill `v1.0` release will introduce API breaking changes](https://github.com/nteract/papermill/blob/d554193bc458797b63af1f94964883d5dcca2418/README.md). It would be good to determine if these changes will matter for pyhf testing and require the addition of [scrapbook](https://nteract-scrapbook.readthedocs.io/en/latest/) or if the API change doesn't affect pyhf. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 from setuptools import setup, find_packages 4 from os import path 5 import sys 6 7 this_directory = path.abspath(path.dirname(__file__)) 8 if sys.version_info.major < 3: 9 from io import open 10 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md: 11 long_description = readme_md.read() 12 13 extras_require = { 14 'tensorflow': [ 15 'tensorflow~=1.13', 16 'tensorflow-probability~=0.5', 17 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass 18 'setuptools<=39.1.0', 19 ], 20 'torch': ['torch~=1.0'], 21 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'], 22 # 'dask': [ 23 # 'dask[array]' 24 # ], 25 'xmlio': ['uproot'], 26 'minuit': ['iminuit'], 27 'develop': [ 28 'pyflakes', 29 'pytest~=3.5', 30 'pytest-cov>=2.5.1', 31 'pytest-mock', 32 'pytest-benchmark[histogram]', 33 'pytest-console-scripts', 34 'python-coveralls', 35 'coverage>=4.0', # coveralls 36 'matplotlib', 37 'jupyter', 38 'nbdime', 39 'uproot~=3.3', 40 'papermill~=0.16', 41 'graphviz', 42 'bumpversion', 43 'sphinx', 44 'sphinxcontrib-bibtex', 45 'sphinxcontrib-napoleon', 46 'sphinx_rtd_theme', 47 'nbsphinx', 48 'sphinx-issues', 49 'm2r', 50 'jsonpatch', 51 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now 52 'pre-commit', 53 'black;python_version>="3.6"', # Black is Python3 only 54 'twine', 55 ], 56 } 57 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 58 59 60 def _is_test_pypi(): 61 """ 62 Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and 63 set to true (c.f. .travis.yml) 64 65 The use_scm_version kwarg accepts a callable for the local_scheme 66 configuration parameter with argument "version". This can be replaced 67 with a lambda as the desired version structure is {next_version}.dev{distance} 68 c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy 69 70 As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version 71 controlled through bumpversion is used. 72 """ 73 from os import getenv 74 75 return ( 76 {'local_scheme': lambda version: ''} 77 if getenv('TESTPYPI_UPLOAD') == 'true' 78 else False 79 ) 80 81 82 setup( 83 name='pyhf', 84 version='0.1.0', 85 description='(partial) pure python histfactory implementation', 86 long_description=long_description, 87 long_description_content_type='text/markdown', 88 url='https://github.com/diana-hep/pyhf', 89 author='Lukas Heinrich', 90 author_email='[email protected]', 91 license='Apache', 92 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask', 93 classifiers=[ 94 "Programming Language :: Python :: 2", 95 "Programming Language :: Python :: 2.7", 96 "Programming Language :: Python :: 3", 97 "Programming Language :: Python :: 3.6", 98 "Programming Language :: Python :: 3.7", 99 ], 100 packages=find_packages(), 101 include_package_data=True, 102 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*", 103 install_requires=[ 104 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet 105 'click>=6.0', # for console scripts, 106 'tqdm', # for readxml 107 'six', # for modifiers 108 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6 109 'jsonpatch', 110 ], 111 extras_require=extras_require, 112 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']}, 113 dependency_links=[], 114 use_scm_version=_is_test_pypi(), 115 ) 116 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -37,7 +37,8 @@ 'jupyter', 'nbdime', 'uproot~=3.3', - 'papermill~=0.16', + 'papermill~=1.0', + 'nteract-scrapbook~=0.2', 'graphviz', 'bumpversion', 'sphinx',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,7 +37,8 @@\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n- 'papermill~=0.16',\n+ 'papermill~=1.0',\n+ 'nteract-scrapbook~=0.2',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n", "issue": "Determine if papermill v1.0 API change is a problem\n# Description\r\n\r\nThe [papermill `v1.0` release will introduce API breaking changes](https://github.com/nteract/papermill/blob/d554193bc458797b63af1f94964883d5dcca2418/README.md). It would be good to determine if these changes will matter for pyhf testing and require the addition of [scrapbook](https://nteract-scrapbook.readthedocs.io/en/latest/) or if the API change doesn't affect pyhf.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=1.13',\n 'tensorflow-probability~=0.5',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch~=1.0'],\n 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n 'papermill~=0.16',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\ndef _is_test_pypi():\n \"\"\"\n Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and\n set to true (c.f. .travis.yml)\n\n The use_scm_version kwarg accepts a callable for the local_scheme\n configuration parameter with argument \"version\". This can be replaced\n with a lambda as the desired version structure is {next_version}.dev{distance}\n c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy\n\n As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version\n controlled through bumpversion is used.\n \"\"\"\n from os import getenv\n\n return (\n {'local_scheme': lambda version: ''}\n if getenv('TESTPYPI_UPLOAD') == 'true'\n else False\n )\n\n\nsetup(\n name='pyhf',\n version='0.1.0',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n use_scm_version=_is_test_pypi(),\n)\n", "path": "setup.py"}]}
1,946
104
gh_patches_debug_214
rasdani/github-patches
git_diff
DDMAL__CantusDB-328
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Representation of Notation objects When I recently tried to edit a source, I was presented with an error message, and found that I was missing several required fields, including this one: ![Screen Shot 2022-09-06 at 08 59 56](https://user-images.githubusercontent.com/58090591/188641630-13f87954-0f92-405f-a561-effe3075a3db.png) Notation objects are currently pretty inscrutable. They should be represented such that at least their `name` property is visible. Larger question: why do we have notation objects at all? Currently, the notation model has only one property: `name`. Could this information in Source objects not be more simply represented by a CharField? Is using Notation objects simply the way things were done in OldCantus? Are we using them to ensure standardization among multiple Sources? </issue> <code> [start of django/cantusdb_project/main_app/models/notation.py] 1 from django.db import models 2 from main_app.models import BaseModel 3 4 5 class Notation(BaseModel): 6 name = models.CharField(max_length=63) 7 [end of django/cantusdb_project/main_app/models/notation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django/cantusdb_project/main_app/models/notation.py b/django/cantusdb_project/main_app/models/notation.py --- a/django/cantusdb_project/main_app/models/notation.py +++ b/django/cantusdb_project/main_app/models/notation.py @@ -4,3 +4,5 @@ class Notation(BaseModel): name = models.CharField(max_length=63) + def __str__(self): + return f"{self.name} ({self.id})" \ No newline at end of file
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/models/notation.py b/django/cantusdb_project/main_app/models/notation.py\n--- a/django/cantusdb_project/main_app/models/notation.py\n+++ b/django/cantusdb_project/main_app/models/notation.py\n@@ -4,3 +4,5 @@\n \n class Notation(BaseModel):\n name = models.CharField(max_length=63)\n+ def __str__(self):\n+ return f\"{self.name} ({self.id})\"\n\\ No newline at end of file\n", "issue": "Representation of Notation objects\nWhen I recently tried to edit a source, I was presented with an error message, and found that I was missing several required fields, including this one: ![Screen Shot 2022-09-06 at 08 59 56](https://user-images.githubusercontent.com/58090591/188641630-13f87954-0f92-405f-a561-effe3075a3db.png)\r\nNotation objects are currently pretty inscrutable. They should be represented such that at least their `name` property is visible.\r\n\r\nLarger question: why do we have notation objects at all? Currently, the notation model has only one property: `name`. Could this information in Source objects not be more simply represented by a CharField? Is using Notation objects simply the way things were done in OldCantus? Are we using them to ensure standardization among multiple Sources?\n", "before_files": [{"content": "from django.db import models\nfrom main_app.models import BaseModel\n\n\nclass Notation(BaseModel):\n name = models.CharField(max_length=63)\n", "path": "django/cantusdb_project/main_app/models/notation.py"}]}
807
121
gh_patches_debug_16631
rasdani/github-patches
git_diff
google__osv.dev-986
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The purl of Alpine ecosystem is inconsistent with purl-spec According to [**purl-type `apk`**](https://github.com/package-url/purl-spec/blob/master/PURL-TYPES.rst#apk) ([PR](https://github.com/package-url/purl-spec/pull/171)), it seems that the purl type of `Alpine` should be `apk` and the purl namespace can be `alpine`. In this project, however, the purl type of `Alpine` is `alpine`. As a result, the purl of a package is **different** when apply different standards. For example, the purl of the `curl` package is `pkg:apk/alpine/curl` according to purl-spec while it is `pkg:alpine/curl` in this project. </issue> <code> [start of osv/purl_helpers.py] 1 # Copyright 2022 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """PURL conversion utilities.""" 15 16 from urllib.parse import quote 17 18 PURL_ECOSYSTEMS = { 19 'crates.io': 'cargo', 20 'Debian': 'deb', 21 'Hex': 'hex', 22 'Go': 'golang', 23 'Maven': 'maven', 24 'NuGet': 'nuget', 25 'npm': 'npm', 26 'Packagist': 'composer', 27 'OSS-Fuzz': 'generic', 28 'PyPI': 'pypi', 29 'RubyGems': 'gem', 30 } 31 32 33 def _url_encode(package_name): 34 """URL encode a PURL `namespace/name` or `name`.""" 35 parts = package_name.split('/') 36 return '/'.join(quote(p) for p in parts) 37 38 39 def package_to_purl(ecosystem, package_name): 40 """Convert a ecosystem and package name to PURL.""" 41 purl_type = PURL_ECOSYSTEMS.get(ecosystem) 42 if not purl_type: 43 return None 44 45 suffix = '' 46 47 if purl_type == 'maven': 48 # PURLs use / to separate the group ID and the artifact ID. 49 package_name = package_name.replace(':', '/', 1) 50 51 if purl_type == 'deb': 52 package_name = 'debian/' + package_name 53 suffix = '?arch=source' 54 55 return f'pkg:{purl_type}/{_url_encode(package_name)}{suffix}' 56 [end of osv/purl_helpers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/osv/purl_helpers.py b/osv/purl_helpers.py --- a/osv/purl_helpers.py +++ b/osv/purl_helpers.py @@ -16,6 +16,7 @@ from urllib.parse import quote PURL_ECOSYSTEMS = { + 'Alpine': 'apk', 'crates.io': 'cargo', 'Debian': 'deb', 'Hex': 'hex', @@ -48,8 +49,12 @@ # PURLs use / to separate the group ID and the artifact ID. package_name = package_name.replace(':', '/', 1) - if purl_type == 'deb': + if purl_type == 'deb' and ecosystem == 'Debian': package_name = 'debian/' + package_name suffix = '?arch=source' + if purl_type == 'apk' and ecosystem == 'Alpine': + package_name = 'alpine/' + package_name + suffix = '?arch=source' + return f'pkg:{purl_type}/{_url_encode(package_name)}{suffix}'
{"golden_diff": "diff --git a/osv/purl_helpers.py b/osv/purl_helpers.py\n--- a/osv/purl_helpers.py\n+++ b/osv/purl_helpers.py\n@@ -16,6 +16,7 @@\n from urllib.parse import quote\n \n PURL_ECOSYSTEMS = {\n+ 'Alpine': 'apk',\n 'crates.io': 'cargo',\n 'Debian': 'deb',\n 'Hex': 'hex',\n@@ -48,8 +49,12 @@\n # PURLs use / to separate the group ID and the artifact ID.\n package_name = package_name.replace(':', '/', 1)\n \n- if purl_type == 'deb':\n+ if purl_type == 'deb' and ecosystem == 'Debian':\n package_name = 'debian/' + package_name\n suffix = '?arch=source'\n \n+ if purl_type == 'apk' and ecosystem == 'Alpine':\n+ package_name = 'alpine/' + package_name\n+ suffix = '?arch=source'\n+\n return f'pkg:{purl_type}/{_url_encode(package_name)}{suffix}'\n", "issue": "The purl of Alpine ecosystem is inconsistent with purl-spec\nAccording to [**purl-type `apk`**](https://github.com/package-url/purl-spec/blob/master/PURL-TYPES.rst#apk) ([PR](https://github.com/package-url/purl-spec/pull/171)), it seems that the purl type of `Alpine` should be `apk` and the purl namespace can be `alpine`. In this project, however, the purl type of `Alpine` is `alpine`.\r\n\r\nAs a result, the purl of a package is **different** when apply different standards. For example, the purl of the `curl` package is `pkg:apk/alpine/curl` according to purl-spec while it is `pkg:alpine/curl` in this project.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PURL conversion utilities.\"\"\"\n\nfrom urllib.parse import quote\n\nPURL_ECOSYSTEMS = {\n 'crates.io': 'cargo',\n 'Debian': 'deb',\n 'Hex': 'hex',\n 'Go': 'golang',\n 'Maven': 'maven',\n 'NuGet': 'nuget',\n 'npm': 'npm',\n 'Packagist': 'composer',\n 'OSS-Fuzz': 'generic',\n 'PyPI': 'pypi',\n 'RubyGems': 'gem',\n}\n\n\ndef _url_encode(package_name):\n \"\"\"URL encode a PURL `namespace/name` or `name`.\"\"\"\n parts = package_name.split('/')\n return '/'.join(quote(p) for p in parts)\n\n\ndef package_to_purl(ecosystem, package_name):\n \"\"\"Convert a ecosystem and package name to PURL.\"\"\"\n purl_type = PURL_ECOSYSTEMS.get(ecosystem)\n if not purl_type:\n return None\n\n suffix = ''\n\n if purl_type == 'maven':\n # PURLs use / to separate the group ID and the artifact ID.\n package_name = package_name.replace(':', '/', 1)\n\n if purl_type == 'deb':\n package_name = 'debian/' + package_name\n suffix = '?arch=source'\n\n return f'pkg:{purl_type}/{_url_encode(package_name)}{suffix}'\n", "path": "osv/purl_helpers.py"}]}
1,257
245
gh_patches_debug_15071
rasdani/github-patches
git_diff
PlasmaPy__PlasmaPy-688
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update acknowledgements to include NSF CSSI grant Good news, everyone! The NSF CSSI proposal that we submitted has officially been awarded! At the request of NSF program manager Slava Lukin, we should update our acknowledgements to include this grant and reflect the support from NSF. I'll assign myself to do this when the grant officially starts (which appears to have been moved up to October 1, 2019). The things that we need to do include: - [x] Revise the sentence that we ask people to include in the acknowledgements section of their papers to reflect funding from NSF - [x] Add an acknowledgements section at the end of `README.md` - [x] Update `CITATION.md` - [x] Update `docs/about/credits.rst` - [x] Update `plasmapy.__citation__` Another thing peripherally related to this issue is to: - [x] Update the [proposal](https://doi.org/10.5281/zenodo.2633286) that we archived on Zenodo to include the answers to questions we had to send in and the panel's comments I'll assign this to myself. </issue> <code> [start of plasmapy/__init__.py] 1 """ 2 PlasmaPy: A plasma physics Python package 3 ================================================ 4 5 Documentation is available in the docstrings, 6 online at https://docs.plasmapy.org (accessible also using 7 the ``plasmapy.online_help`` function). 8 9 Contents 10 -------- 11 PlasmaPy provides the following functionality: 12 13 Subpackages 14 ----------- 15 Each of these subpackages requires an explicit import, for example, 16 via ``import plasmapy.physics``. 17 18 :: 19 20 atomic --- Database for atoms, isotopes, ions... 21 classes --- (WIP) classes used in multiple places 22 data --- Data used for testing and examples 23 diagnostics --- Experimental research data analysis 24 mathematics --- General formulae used elsewhere 25 physics --- Plasma theory functionality 26 transport --- Transport theory functionality 27 utils --- Various utilities 28 29 Utility tools 30 ------------- 31 :: 32 33 test --- Run PlasmaPy unit tests 34 online_help --- Search the online documentation 35 __version__ --- PlasmaPy version string 36 __citation__ --- PlasmaPy citation template 37 38 """ 39 # Licensed under a 3-clause BSD style license - see LICENSE.rst 40 41 # Packages may add whatever they like to this file, but 42 # should keep this content at the top. 43 # ---------------------------------------------------------------------------- 44 from ._base_init import * 45 # ---------------------------------------------------------------------------- 46 47 # Enforce Python version check during package import. 48 # This is the same check as the one at the top of setup.py 49 import sys 50 51 __name__ = "plasmapy" 52 53 if sys.version_info < tuple((int(val) for val in "3.6".split('.'))): 54 raise Exception("plasmapy does not support Python < {}".format(3.6)) 55 56 57 def online_help(query): 58 """ 59 Search the online PlasmaPy documentation for the given query from plasmapy.org 60 Opens the results in the default web browser. 61 Requires an active Internet connection. 62 Redirects to Astropy.units in case of query 'unit' or 'units' 63 64 Parameters 65 ---------- 66 query : str 67 The search query. 68 """ 69 from urllib.parse import urlencode 70 import webbrowser 71 72 url = ('http://docs.plasmapy.org/en/stable/search.html?' 73 '{0}&check_keywords=yes&area=default').format(urlencode({'q': query})) 74 75 if(query.lower() in ('unit', 'units')): 76 url = 'http://docs.astropy.org/en/stable/units/' 77 78 webbrowser.open(url) 79 80 81 __citation__ = [ 82 "https://doi.org/10.5281/zenodo.1238132", 83 "https://doi.org/10.5281/zenodo.3235817", 84 ] 85 [end of plasmapy/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py --- a/plasmapy/__init__.py +++ b/plasmapy/__init__.py @@ -50,8 +50,13 @@ __name__ = "plasmapy" +__citation__ = ( + "Instructions on how to cite and acknowledge PlasmaPy are provided in the " + "online documentation at: http://docs.plasmapy.org/en/latest/about/citation.html" +) + if sys.version_info < tuple((int(val) for val in "3.6".split('.'))): - raise Exception("plasmapy does not support Python < {}".format(3.6)) + raise Exception("PlasmaPy does not support Python < {}".format(3.6)) def online_help(query): @@ -76,9 +81,3 @@ url = 'http://docs.astropy.org/en/stable/units/' webbrowser.open(url) - - -__citation__ = [ - "https://doi.org/10.5281/zenodo.1238132", - "https://doi.org/10.5281/zenodo.3235817", -]
{"golden_diff": "diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py\n--- a/plasmapy/__init__.py\n+++ b/plasmapy/__init__.py\n@@ -50,8 +50,13 @@\n \n __name__ = \"plasmapy\"\n \n+__citation__ = (\n+ \"Instructions on how to cite and acknowledge PlasmaPy are provided in the \"\n+ \"online documentation at: http://docs.plasmapy.org/en/latest/about/citation.html\"\n+)\n+\n if sys.version_info < tuple((int(val) for val in \"3.6\".split('.'))):\n- raise Exception(\"plasmapy does not support Python < {}\".format(3.6))\n+ raise Exception(\"PlasmaPy does not support Python < {}\".format(3.6))\n \n \n def online_help(query):\n@@ -76,9 +81,3 @@\n url = 'http://docs.astropy.org/en/stable/units/'\n \n webbrowser.open(url)\n-\n-\n-__citation__ = [\n- \"https://doi.org/10.5281/zenodo.1238132\",\n- \"https://doi.org/10.5281/zenodo.3235817\",\n-]\n", "issue": "Update acknowledgements to include NSF CSSI grant\nGood news, everyone! The NSF CSSI proposal that we submitted has officially been awarded! At the request of NSF program manager Slava Lukin, we should update our acknowledgements to include this grant and reflect the support from NSF. I'll assign myself to do this when the grant officially starts (which appears to have been moved up to October 1, 2019). The things that we need to do include:\r\n\r\n - [x] Revise the sentence that we ask people to include in the acknowledgements section of their papers to reflect funding from NSF \r\n - [x] Add an acknowledgements section at the end of `README.md`\r\n - [x] Update `CITATION.md`\r\n - [x] Update `docs/about/credits.rst`\r\n - [x] Update `plasmapy.__citation__`\r\n\r\nAnother thing peripherally related to this issue is to:\r\n\r\n - [x] Update the [proposal](https://doi.org/10.5281/zenodo.2633286) that we archived on Zenodo to include the answers to questions we had to send in and the panel's comments\r\n\r\nI'll assign this to myself. \r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nPlasmaPy: A plasma physics Python package\n================================================\n\nDocumentation is available in the docstrings,\nonline at https://docs.plasmapy.org (accessible also using\nthe ``plasmapy.online_help`` function).\n\nContents\n--------\nPlasmaPy provides the following functionality:\n\nSubpackages\n-----------\nEach of these subpackages requires an explicit import, for example,\nvia ``import plasmapy.physics``.\n\n::\n\n atomic --- Database for atoms, isotopes, ions...\n classes --- (WIP) classes used in multiple places\n data --- Data used for testing and examples\n diagnostics --- Experimental research data analysis\n mathematics --- General formulae used elsewhere\n physics --- Plasma theory functionality\n transport --- Transport theory functionality\n utils --- Various utilities\n\nUtility tools\n-------------\n::\n\n test --- Run PlasmaPy unit tests\n online_help --- Search the online documentation\n __version__ --- PlasmaPy version string\n __citation__ --- PlasmaPy citation template\n\n\"\"\"\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n# Packages may add whatever they like to this file, but\n# should keep this content at the top.\n# ----------------------------------------------------------------------------\nfrom ._base_init import *\n# ----------------------------------------------------------------------------\n\n# Enforce Python version check during package import.\n# This is the same check as the one at the top of setup.py\nimport sys\n\n__name__ = \"plasmapy\"\n\nif sys.version_info < tuple((int(val) for val in \"3.6\".split('.'))):\n raise Exception(\"plasmapy does not support Python < {}\".format(3.6))\n\n\ndef online_help(query):\n \"\"\"\n Search the online PlasmaPy documentation for the given query from plasmapy.org\n Opens the results in the default web browser.\n Requires an active Internet connection.\n Redirects to Astropy.units in case of query 'unit' or 'units'\n\n Parameters\n ----------\n query : str\n The search query.\n \"\"\"\n from urllib.parse import urlencode\n import webbrowser\n\n url = ('http://docs.plasmapy.org/en/stable/search.html?'\n '{0}&check_keywords=yes&area=default').format(urlencode({'q': query}))\n\n if(query.lower() in ('unit', 'units')):\n url = 'http://docs.astropy.org/en/stable/units/'\n\n webbrowser.open(url)\n\n\n__citation__ = [\n \"https://doi.org/10.5281/zenodo.1238132\",\n \"https://doi.org/10.5281/zenodo.3235817\",\n]\n", "path": "plasmapy/__init__.py"}]}
1,548
285
gh_patches_debug_26065
rasdani/github-patches
git_diff
iterative__dvc-2743
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> dvc: .dvcignore trouble with nfs mounted directory I have a large NFS mounted in a directory that I would like dvc to ignore. Directory Structure: ``` directory |___nfs |___... |___.dvc |___.dvcignore ``` My *.dvcignore* has the following line: `/nfs/` (I've tried `nfs/` and `nfs/*`) The problem is that when I run `dvc status` or `dvc pull` the processes will just hang: ``` DEBUG: PRAGMA user_version; DEBUG: fetched: [(3,)] DEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL) DEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER) DEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL) DEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info) DEBUG: PRAGMA user_version = 3; ``` Here is the traceback from `KeyboardInterrupt`: ``` File "/home/ec2-user/app/proc/.env/lib/python3.7/site-packages/dvc/repo/__init__.py", line 499, in dvcignore return DvcIgnoreFilter(self.root_dir) File "/home/ec2-user/app/proc/.env/lib/python3.7/site-packages/dvc/ignore.py", line 67, in __init__ for root, dirs, _ in os.walk(root_dir): File "/home/ec2-user/app/proc/.env/lib64/python3.7/os.py", line 410, in walk yield from walk(new_path, topdown, onerror, followlinks) File "/home/ec2-user/app/proc/.env/lib64/python3.7/os.py", line 368, in walk is_dir = entry.is_dir() ``` Which makes me feel like the directory is not being ignored. ***Additonal*** I've unmounted the NFS directory and ran `dvc status` with no problem so I believe the issue stems from dvc trying to traverse it. System Information: ``` DVC version: 0.66.6 Python version: 3.7.4 Platform: Linux 4.14.109-99.92.amzn2.x86_64 Installation: pip ``` </issue> <code> [start of dvc/ignore.py] 1 from __future__ import unicode_literals 2 3 import logging 4 import os 5 6 from pathspec import PathSpec 7 from pathspec.patterns import GitWildMatchPattern 8 9 from dvc.utils import relpath 10 from dvc.utils.compat import open 11 12 logger = logging.getLogger(__name__) 13 14 15 class DvcIgnore(object): 16 DVCIGNORE_FILE = ".dvcignore" 17 18 def __call__(self, root, dirs, files): 19 raise NotImplementedError 20 21 22 class DvcIgnorePatterns(DvcIgnore): 23 def __init__(self, ignore_file_path): 24 assert os.path.isabs(ignore_file_path) 25 26 self.ignore_file_path = ignore_file_path 27 self.dirname = os.path.normpath(os.path.dirname(ignore_file_path)) 28 29 with open(ignore_file_path, encoding="utf-8") as fobj: 30 self.ignore_spec = PathSpec.from_lines(GitWildMatchPattern, fobj) 31 32 def __call__(self, root, dirs, files): 33 files = [f for f in files if not self.matches(root, f)] 34 dirs = [d for d in dirs if not self.matches(root, d)] 35 36 return dirs, files 37 38 def matches(self, dirname, basename): 39 abs_path = os.path.join(dirname, basename) 40 rel_path = relpath(abs_path, self.dirname) 41 42 if os.pardir + os.sep in rel_path: 43 return False 44 return self.ignore_spec.match_file(rel_path) 45 46 def __hash__(self): 47 return hash(self.ignore_file_path) 48 49 def __eq__(self, other): 50 return self.ignore_file_path == other.ignore_file_path 51 52 53 class DvcIgnoreDirs(DvcIgnore): 54 def __init__(self, basenames): 55 self.basenames = set(basenames) 56 57 def __call__(self, root, dirs, files): 58 dirs = [d for d in dirs if d not in self.basenames] 59 60 return dirs, files 61 62 63 class DvcIgnoreFilter(object): 64 def __init__(self, root_dir): 65 self.ignores = {DvcIgnoreDirs([".git", ".hg", ".dvc"])} 66 self._update(root_dir) 67 for root, dirs, _ in os.walk(root_dir): 68 for d in dirs: 69 self._update(os.path.join(root, d)) 70 71 def _update(self, dirname): 72 ignore_file_path = os.path.join(dirname, DvcIgnore.DVCIGNORE_FILE) 73 if os.path.exists(ignore_file_path): 74 self.ignores.add(DvcIgnorePatterns(ignore_file_path)) 75 76 def __call__(self, root, dirs, files): 77 for ignore in self.ignores: 78 dirs, files = ignore(root, dirs, files) 79 80 return dirs, files 81 [end of dvc/ignore.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/ignore.py b/dvc/ignore.py --- a/dvc/ignore.py +++ b/dvc/ignore.py @@ -6,6 +6,7 @@ from pathspec import PathSpec from pathspec.patterns import GitWildMatchPattern +from dvc.utils import dvc_walk from dvc.utils import relpath from dvc.utils.compat import open @@ -47,6 +48,9 @@ return hash(self.ignore_file_path) def __eq__(self, other): + if not isinstance(other, DvcIgnorePatterns): + return NotImplemented + return self.ignore_file_path == other.ignore_file_path @@ -59,12 +63,21 @@ return dirs, files + def __hash__(self): + return hash(tuple(self.basenames)) + + def __eq__(self, other): + if not isinstance(other, DvcIgnoreDirs): + return NotImplemented + + return self.basenames == other.basenames + class DvcIgnoreFilter(object): def __init__(self, root_dir): self.ignores = {DvcIgnoreDirs([".git", ".hg", ".dvc"])} self._update(root_dir) - for root, dirs, _ in os.walk(root_dir): + for root, dirs, _ in dvc_walk(root_dir, self): for d in dirs: self._update(os.path.join(root, d))
{"golden_diff": "diff --git a/dvc/ignore.py b/dvc/ignore.py\n--- a/dvc/ignore.py\n+++ b/dvc/ignore.py\n@@ -6,6 +6,7 @@\n from pathspec import PathSpec\n from pathspec.patterns import GitWildMatchPattern\n \n+from dvc.utils import dvc_walk\n from dvc.utils import relpath\n from dvc.utils.compat import open\n \n@@ -47,6 +48,9 @@\n return hash(self.ignore_file_path)\n \n def __eq__(self, other):\n+ if not isinstance(other, DvcIgnorePatterns):\n+ return NotImplemented\n+\n return self.ignore_file_path == other.ignore_file_path\n \n \n@@ -59,12 +63,21 @@\n \n return dirs, files\n \n+ def __hash__(self):\n+ return hash(tuple(self.basenames))\n+\n+ def __eq__(self, other):\n+ if not isinstance(other, DvcIgnoreDirs):\n+ return NotImplemented\n+\n+ return self.basenames == other.basenames\n+\n \n class DvcIgnoreFilter(object):\n def __init__(self, root_dir):\n self.ignores = {DvcIgnoreDirs([\".git\", \".hg\", \".dvc\"])}\n self._update(root_dir)\n- for root, dirs, _ in os.walk(root_dir):\n+ for root, dirs, _ in dvc_walk(root_dir, self):\n for d in dirs:\n self._update(os.path.join(root, d))\n", "issue": "dvc: .dvcignore trouble with nfs mounted directory\nI have a large NFS mounted in a directory that I would like dvc to ignore. \r\n\r\nDirectory Structure:\r\n```\r\ndirectory\r\n|___nfs\r\n|___...\r\n|___.dvc\r\n|___.dvcignore\r\n```\r\nMy *.dvcignore* has the following line:\r\n`/nfs/` (I've tried `nfs/` and `nfs/*`)\r\n\r\nThe problem is that when I run `dvc status` or `dvc pull` the processes will just hang:\r\n```\r\nDEBUG: PRAGMA user_version;\r\nDEBUG: fetched: [(3,)]\r\nDEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)\r\nDEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)\r\nDEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)\r\nDEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)\r\nDEBUG: PRAGMA user_version = 3; \r\n```\r\n\r\nHere is the traceback from `KeyboardInterrupt`:\r\n```\r\n File \"/home/ec2-user/app/proc/.env/lib/python3.7/site-packages/dvc/repo/__init__.py\", line 499, in dvcignore\r\n return DvcIgnoreFilter(self.root_dir)\r\n File \"/home/ec2-user/app/proc/.env/lib/python3.7/site-packages/dvc/ignore.py\", line 67, in __init__\r\n for root, dirs, _ in os.walk(root_dir):\r\n File \"/home/ec2-user/app/proc/.env/lib64/python3.7/os.py\", line 410, in walk\r\n yield from walk(new_path, topdown, onerror, followlinks)\r\n File \"/home/ec2-user/app/proc/.env/lib64/python3.7/os.py\", line 368, in walk\r\n is_dir = entry.is_dir() \r\n```\r\nWhich makes me feel like the directory is not being ignored.\r\n\r\n***Additonal***\r\nI've unmounted the NFS directory and ran `dvc status` with no problem so I believe the issue stems from dvc trying to traverse it.\r\n\r\nSystem Information:\r\n``` \r\nDVC version: 0.66.6\r\nPython version: 3.7.4\r\nPlatform: Linux 4.14.109-99.92.amzn2.x86_64\r\nInstallation: pip\r\n```\r\n\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport os\n\nfrom pathspec import PathSpec\nfrom pathspec.patterns import GitWildMatchPattern\n\nfrom dvc.utils import relpath\nfrom dvc.utils.compat import open\n\nlogger = logging.getLogger(__name__)\n\n\nclass DvcIgnore(object):\n DVCIGNORE_FILE = \".dvcignore\"\n\n def __call__(self, root, dirs, files):\n raise NotImplementedError\n\n\nclass DvcIgnorePatterns(DvcIgnore):\n def __init__(self, ignore_file_path):\n assert os.path.isabs(ignore_file_path)\n\n self.ignore_file_path = ignore_file_path\n self.dirname = os.path.normpath(os.path.dirname(ignore_file_path))\n\n with open(ignore_file_path, encoding=\"utf-8\") as fobj:\n self.ignore_spec = PathSpec.from_lines(GitWildMatchPattern, fobj)\n\n def __call__(self, root, dirs, files):\n files = [f for f in files if not self.matches(root, f)]\n dirs = [d for d in dirs if not self.matches(root, d)]\n\n return dirs, files\n\n def matches(self, dirname, basename):\n abs_path = os.path.join(dirname, basename)\n rel_path = relpath(abs_path, self.dirname)\n\n if os.pardir + os.sep in rel_path:\n return False\n return self.ignore_spec.match_file(rel_path)\n\n def __hash__(self):\n return hash(self.ignore_file_path)\n\n def __eq__(self, other):\n return self.ignore_file_path == other.ignore_file_path\n\n\nclass DvcIgnoreDirs(DvcIgnore):\n def __init__(self, basenames):\n self.basenames = set(basenames)\n\n def __call__(self, root, dirs, files):\n dirs = [d for d in dirs if d not in self.basenames]\n\n return dirs, files\n\n\nclass DvcIgnoreFilter(object):\n def __init__(self, root_dir):\n self.ignores = {DvcIgnoreDirs([\".git\", \".hg\", \".dvc\"])}\n self._update(root_dir)\n for root, dirs, _ in os.walk(root_dir):\n for d in dirs:\n self._update(os.path.join(root, d))\n\n def _update(self, dirname):\n ignore_file_path = os.path.join(dirname, DvcIgnore.DVCIGNORE_FILE)\n if os.path.exists(ignore_file_path):\n self.ignores.add(DvcIgnorePatterns(ignore_file_path))\n\n def __call__(self, root, dirs, files):\n for ignore in self.ignores:\n dirs, files = ignore(root, dirs, files)\n\n return dirs, files\n", "path": "dvc/ignore.py"}]}
1,832
326
gh_patches_debug_16139
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-1922
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> E1017 with nested !Select *cfn-lint version: (`cfn-lint --version`)* 0.46.0 *Description of issue.* When linting this CFT YAML: ```yaml subnet0A: DependsOn: ipv6CidrBlock Type: 'AWS::EC2::Subnet' Properties: VpcId: !Ref vpc CidrBlock: !Select - !Select - 1 - !Split - ',' - !FindInMap - subnetMap - !Ref numAzs - !Ref numSubnets - 'Fn::Cidr': - !GetAtt - vpc - CidrBlock - !Select - 0 - !Split - ',' - !FindInMap - subnetMap - !Ref numAzs - !Ref numSubnets - !FindInMap - subnetMap - maskTocidrBits - !Ref subnetMask ``` I get this error: E1017 Select index should be an Integer or a function Ref or FindInMap for Resources/subnet0A/Properties/CidrBlock/Fn::Select Template works fine. thanks Cfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`) </issue> <code> [start of src/cfnlint/rules/functions/Select.py] 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 import six 6 from cfnlint.rules import CloudFormationLintRule 7 from cfnlint.rules import RuleMatch 8 9 10 class Select(CloudFormationLintRule): 11 """Check if Select values are correct""" 12 id = 'E1017' 13 shortdesc = 'Select validation of parameters' 14 description = 'Making sure the function not is of list' 15 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-select.html' 16 tags = ['functions', 'select'] 17 18 def match(self, cfn): 19 matches = [] 20 21 select_objs = cfn.search_deep_keys('Fn::Select') 22 23 supported_functions = [ 24 'Fn::FindInMap', 25 'Fn::GetAtt', 26 'Fn::GetAZs', 27 'Fn::If', 28 'Fn::Split', 29 'Fn::Cidr', 30 'Ref' 31 ] 32 33 for select_obj in select_objs: 34 select_value_obj = select_obj[-1] 35 tree = select_obj[:-1] 36 if isinstance(select_value_obj, list): 37 if len(select_value_obj) == 2: 38 index_obj = select_value_obj[0] 39 list_of_objs = select_value_obj[1] 40 if isinstance(index_obj, dict): 41 if len(index_obj) == 1: 42 for index_key, _ in index_obj.items(): 43 if index_key not in ['Ref', 'Fn::FindInMap']: 44 message = 'Select index should be an Integer or a function Ref or FindInMap for {0}' 45 matches.append(RuleMatch( 46 tree, message.format('/'.join(map(str, tree))))) 47 elif not isinstance(index_obj, six.integer_types): 48 try: 49 int(index_obj) 50 except ValueError: 51 message = 'Select index should be an Integer or a function of Ref or FindInMap for {0}' 52 matches.append(RuleMatch( 53 tree, message.format('/'.join(map(str, tree))))) 54 if isinstance(list_of_objs, dict): 55 if len(list_of_objs) == 1: 56 for key, _ in list_of_objs.items(): 57 if key not in supported_functions: 58 message = 'Select should use a supported function of {0}' 59 matches.append(RuleMatch( 60 tree, message.format(', '.join(map(str, supported_functions))))) 61 else: 62 message = 'Select should use a supported function of {0}' 63 matches.append(RuleMatch( 64 tree, message.format(', '.join(map(str, supported_functions))))) 65 elif not isinstance(list_of_objs, list): 66 message = 'Select should be an array of values for {0}' 67 matches.append(RuleMatch( 68 tree, message.format('/'.join(map(str, tree))))) 69 else: 70 message = 'Select should be a list of 2 elements for {0}' 71 matches.append(RuleMatch( 72 tree, message.format('/'.join(map(str, tree))))) 73 else: 74 message = 'Select should be a list of 2 elements for {0}' 75 matches.append(RuleMatch( 76 tree, message.format('/'.join(map(str, tree))))) 77 return matches 78 [end of src/cfnlint/rules/functions/Select.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/functions/Select.py b/src/cfnlint/rules/functions/Select.py --- a/src/cfnlint/rules/functions/Select.py +++ b/src/cfnlint/rules/functions/Select.py @@ -40,7 +40,7 @@ if isinstance(index_obj, dict): if len(index_obj) == 1: for index_key, _ in index_obj.items(): - if index_key not in ['Ref', 'Fn::FindInMap']: + if index_key not in ['Ref', 'Fn::FindInMap', 'Fn::Select']: message = 'Select index should be an Integer or a function Ref or FindInMap for {0}' matches.append(RuleMatch( tree, message.format('/'.join(map(str, tree)))))
{"golden_diff": "diff --git a/src/cfnlint/rules/functions/Select.py b/src/cfnlint/rules/functions/Select.py\n--- a/src/cfnlint/rules/functions/Select.py\n+++ b/src/cfnlint/rules/functions/Select.py\n@@ -40,7 +40,7 @@\n if isinstance(index_obj, dict):\n if len(index_obj) == 1:\n for index_key, _ in index_obj.items():\n- if index_key not in ['Ref', 'Fn::FindInMap']:\n+ if index_key not in ['Ref', 'Fn::FindInMap', 'Fn::Select']:\n message = 'Select index should be an Integer or a function Ref or FindInMap for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n", "issue": "E1017 with nested !Select\n*cfn-lint version: (`cfn-lint --version`)*\r\n0.46.0\r\n\r\n*Description of issue.*\r\nWhen linting this CFT YAML:\r\n```yaml\r\nsubnet0A:\r\n DependsOn: ipv6CidrBlock\r\n Type: 'AWS::EC2::Subnet'\r\n Properties:\r\n VpcId: !Ref vpc\r\n CidrBlock: !Select\r\n - !Select\r\n - 1\r\n - !Split\r\n - ','\r\n - !FindInMap\r\n - subnetMap\r\n - !Ref numAzs\r\n - !Ref numSubnets\r\n - 'Fn::Cidr':\r\n - !GetAtt\r\n - vpc\r\n - CidrBlock\r\n - !Select\r\n - 0\r\n - !Split\r\n - ','\r\n - !FindInMap\r\n - subnetMap\r\n - !Ref numAzs\r\n - !Ref numSubnets\r\n - !FindInMap\r\n - subnetMap\r\n - maskTocidrBits\r\n - !Ref subnetMask\r\n```\r\n\r\nI get this error:\r\n\r\nE1017 Select index should be an Integer or a function Ref or FindInMap for Resources/subnet0A/Properties/CidrBlock/Fn::Select\r\n\r\nTemplate works fine.\r\nthanks\r\n\r\nCfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass Select(CloudFormationLintRule):\n \"\"\"Check if Select values are correct\"\"\"\n id = 'E1017'\n shortdesc = 'Select validation of parameters'\n description = 'Making sure the function not is of list'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-select.html'\n tags = ['functions', 'select']\n\n def match(self, cfn):\n matches = []\n\n select_objs = cfn.search_deep_keys('Fn::Select')\n\n supported_functions = [\n 'Fn::FindInMap',\n 'Fn::GetAtt',\n 'Fn::GetAZs',\n 'Fn::If',\n 'Fn::Split',\n 'Fn::Cidr',\n 'Ref'\n ]\n\n for select_obj in select_objs:\n select_value_obj = select_obj[-1]\n tree = select_obj[:-1]\n if isinstance(select_value_obj, list):\n if len(select_value_obj) == 2:\n index_obj = select_value_obj[0]\n list_of_objs = select_value_obj[1]\n if isinstance(index_obj, dict):\n if len(index_obj) == 1:\n for index_key, _ in index_obj.items():\n if index_key not in ['Ref', 'Fn::FindInMap']:\n message = 'Select index should be an Integer or a function Ref or FindInMap for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n elif not isinstance(index_obj, six.integer_types):\n try:\n int(index_obj)\n except ValueError:\n message = 'Select index should be an Integer or a function of Ref or FindInMap for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n if isinstance(list_of_objs, dict):\n if len(list_of_objs) == 1:\n for key, _ in list_of_objs.items():\n if key not in supported_functions:\n message = 'Select should use a supported function of {0}'\n matches.append(RuleMatch(\n tree, message.format(', '.join(map(str, supported_functions)))))\n else:\n message = 'Select should use a supported function of {0}'\n matches.append(RuleMatch(\n tree, message.format(', '.join(map(str, supported_functions)))))\n elif not isinstance(list_of_objs, list):\n message = 'Select should be an array of values for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Select should be a list of 2 elements for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Select should be a list of 2 elements for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n return matches\n", "path": "src/cfnlint/rules/functions/Select.py"}]}
1,745
171
gh_patches_debug_3665
rasdani/github-patches
git_diff
pex-tool__pex-634
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bootstrap demotion is not robust While working https://github.com/pantsbuild/pants/issues/6927 it was discovered that there were multiple `.bootstrap/` on the sys.path and only one was demoted, leading to pants, when running in a pex, picking .bootstrap.pex instead of the pex dist it in `.deps/`. In this case, Pants was purposefully duplicating pex path entries to work around a bug in the `coverage` library. PEX should be robust and demote all instances of it's `.bootstrap/` entry on the sys.path. </issue> <code> [start of pex/bootstrap.py] 1 # coding=utf-8 2 # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). 3 # Licensed under the Apache License, Version 2.0 (see LICENSE). 4 5 import os 6 7 8 class Bootstrap(object): 9 """Supports introspection of the PEX bootstrap code.""" 10 11 _INSTANCE = None 12 13 @classmethod 14 def locate(cls): 15 """Locates the active PEX bootstrap. 16 17 :rtype: :class:`Bootstrap` 18 """ 19 if cls._INSTANCE is None: 20 bootstrap_path = __file__ 21 module_import_path = __name__.split('.') 22 23 # For example, our __file__ might be requests.pex/.bootstrap/pex/bootstrap.pyc and our import 24 # path pex.bootstrap; so we walk back through all the module components of our import path to 25 # find the base sys.path entry where we were found (requests.pex/.bootstrap in this example). 26 for _ in module_import_path: 27 bootstrap_path = os.path.dirname(bootstrap_path) 28 29 cls._INSTANCE = cls(sys_path_entry=bootstrap_path) 30 return cls._INSTANCE 31 32 def __init__(self, sys_path_entry): 33 self._sys_path_entry = sys_path_entry 34 self._realpath = os.path.realpath(self._sys_path_entry) 35 36 def demote(self): 37 """Demote the bootstrap code to the end of the `sys.path` so it is found last. 38 39 :return: The list of un-imported bootstrap modules. 40 :rtype: list of :class:`types.ModuleType` 41 """ 42 import sys # Grab a hold of `sys` early since we'll be un-importing our module in this process. 43 44 unimported_modules = [] 45 for name, module in reversed(sorted(sys.modules.items())): 46 if self.imported_from_bootstrap(module): 47 unimported_modules.append(sys.modules.pop(name)) 48 49 sys.path.remove(self._sys_path_entry) 50 sys.path.append(self._sys_path_entry) 51 52 return unimported_modules 53 54 def imported_from_bootstrap(self, module): 55 """Return ``True`` if the given ``module`` object was imported from bootstrap code. 56 57 :param module: The module to check the provenance of. 58 :type module: :class:`types.ModuleType` 59 :rtype: bool 60 """ 61 62 # A vendored module. 63 path = getattr(module, '__file__', None) 64 if path and os.path.realpath(path).startswith(self._realpath): 65 return True 66 67 # A vendored package. 68 path = getattr(module, '__path__', None) 69 if path and any(os.path.realpath(path_item).startswith(self._realpath) 70 for path_item in path): 71 return True 72 73 return False 74 75 def __repr__(self): 76 return '{cls}(sys_path_entry={sys_path_entry!r})'.format(cls=type(self).__name__, 77 sys_path_entry=self._sys_path_entry) 78 [end of pex/bootstrap.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/bootstrap.py b/pex/bootstrap.py --- a/pex/bootstrap.py +++ b/pex/bootstrap.py @@ -46,7 +46,7 @@ if self.imported_from_bootstrap(module): unimported_modules.append(sys.modules.pop(name)) - sys.path.remove(self._sys_path_entry) + sys.path[:] = [path for path in sys.path if os.path.realpath(path) != self._realpath] sys.path.append(self._sys_path_entry) return unimported_modules
{"golden_diff": "diff --git a/pex/bootstrap.py b/pex/bootstrap.py\n--- a/pex/bootstrap.py\n+++ b/pex/bootstrap.py\n@@ -46,7 +46,7 @@\n if self.imported_from_bootstrap(module):\n unimported_modules.append(sys.modules.pop(name))\n \n- sys.path.remove(self._sys_path_entry)\n+ sys.path[:] = [path for path in sys.path if os.path.realpath(path) != self._realpath]\n sys.path.append(self._sys_path_entry)\n \n return unimported_modules\n", "issue": "Bootstrap demotion is not robust\nWhile working https://github.com/pantsbuild/pants/issues/6927 it was discovered that there were multiple `.bootstrap/` on the sys.path and only one was demoted, leading to pants, when running in a pex, picking .bootstrap.pex instead of the pex dist it in `.deps/`. In this case, Pants was purposefully duplicating pex path entries to work around a bug in the `coverage` library. PEX should be robust and demote all instances of it's `.bootstrap/` entry on the sys.path.\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport os\n\n\nclass Bootstrap(object):\n \"\"\"Supports introspection of the PEX bootstrap code.\"\"\"\n\n _INSTANCE = None\n\n @classmethod\n def locate(cls):\n \"\"\"Locates the active PEX bootstrap.\n\n :rtype: :class:`Bootstrap`\n \"\"\"\n if cls._INSTANCE is None:\n bootstrap_path = __file__\n module_import_path = __name__.split('.')\n\n # For example, our __file__ might be requests.pex/.bootstrap/pex/bootstrap.pyc and our import\n # path pex.bootstrap; so we walk back through all the module components of our import path to\n # find the base sys.path entry where we were found (requests.pex/.bootstrap in this example).\n for _ in module_import_path:\n bootstrap_path = os.path.dirname(bootstrap_path)\n\n cls._INSTANCE = cls(sys_path_entry=bootstrap_path)\n return cls._INSTANCE\n\n def __init__(self, sys_path_entry):\n self._sys_path_entry = sys_path_entry\n self._realpath = os.path.realpath(self._sys_path_entry)\n\n def demote(self):\n \"\"\"Demote the bootstrap code to the end of the `sys.path` so it is found last.\n\n :return: The list of un-imported bootstrap modules.\n :rtype: list of :class:`types.ModuleType`\n \"\"\"\n import sys # Grab a hold of `sys` early since we'll be un-importing our module in this process.\n\n unimported_modules = []\n for name, module in reversed(sorted(sys.modules.items())):\n if self.imported_from_bootstrap(module):\n unimported_modules.append(sys.modules.pop(name))\n\n sys.path.remove(self._sys_path_entry)\n sys.path.append(self._sys_path_entry)\n\n return unimported_modules\n\n def imported_from_bootstrap(self, module):\n \"\"\"Return ``True`` if the given ``module`` object was imported from bootstrap code.\n\n :param module: The module to check the provenance of.\n :type module: :class:`types.ModuleType`\n :rtype: bool\n \"\"\"\n\n # A vendored module.\n path = getattr(module, '__file__', None)\n if path and os.path.realpath(path).startswith(self._realpath):\n return True\n\n # A vendored package.\n path = getattr(module, '__path__', None)\n if path and any(os.path.realpath(path_item).startswith(self._realpath)\n for path_item in path):\n return True\n\n return False\n\n def __repr__(self):\n return '{cls}(sys_path_entry={sys_path_entry!r})'.format(cls=type(self).__name__,\n sys_path_entry=self._sys_path_entry)\n", "path": "pex/bootstrap.py"}]}
1,425
116
gh_patches_debug_32954
rasdani/github-patches
git_diff
liqd__a4-meinberlin-2442
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> in text commenting links to first chapter lead back to the project view ...so I am back to the timeline and the tiles There are two links doing that: "back to chapter" in paragraph detail view and "previous chapter" in second chapter. </issue> <code> [start of meinberlin/apps/documents/views.py] 1 from django.http import Http404 2 from django.urls import reverse 3 from django.utils.translation import ugettext_lazy as _ 4 from django.views import generic 5 6 from adhocracy4.dashboard import mixins as dashboard_mixins 7 from adhocracy4.projects.mixins import DisplayProjectOrModuleMixin 8 from adhocracy4.projects.mixins import ProjectMixin 9 from adhocracy4.rules import mixins as rules_mixins 10 from meinberlin.apps.exports.views import DashboardExportView 11 12 from . import models 13 14 15 class DocumentDashboardView(ProjectMixin, 16 dashboard_mixins.DashboardBaseMixin, 17 dashboard_mixins.DashboardComponentMixin, 18 generic.TemplateView): 19 template_name = 'meinberlin_documents/document_dashboard.html' 20 permission_required = 'a4projects.change_project' 21 22 def get_permission_object(self): 23 return self.project 24 25 26 class ChapterDetailView(ProjectMixin, 27 rules_mixins.PermissionRequiredMixin, 28 generic.DetailView, 29 DisplayProjectOrModuleMixin): 30 model = models.Chapter 31 permission_required = 'meinberlin_documents.view_chapter' 32 get_context_from_object = True 33 34 def get_context_data(self, **kwargs): 35 context = super(ChapterDetailView, self).get_context_data(**kwargs) 36 context['chapter_list'] = self.chapter_list 37 return context 38 39 @property 40 def chapter_list(self): 41 return models.Chapter.objects.filter(module=self.module) 42 43 44 class DocumentDetailView(ChapterDetailView): 45 get_context_from_object = False 46 47 def get_object(self): 48 first_chapter = models.Chapter.objects \ 49 .filter(module=self.module) \ 50 .first() 51 52 if not first_chapter: 53 raise Http404(_('Document has no chapters defined.')) 54 return first_chapter 55 56 57 class ParagraphDetailView(ProjectMixin, 58 rules_mixins.PermissionRequiredMixin, 59 generic.DetailView): 60 model = models.Paragraph 61 permission_required = 'meinberlin_documents.view_paragraph' 62 63 64 class DocumentDashboardExportView(DashboardExportView): 65 template_name = 'meinberlin_exports/export_dashboard.html' 66 67 def get_context_data(self, **kwargs): 68 context = super().get_context_data(**kwargs) 69 context['comment_export'] = reverse( 70 'a4dashboard:document-comment-export', 71 kwargs={'module_slug': self.module.slug}) 72 return context 73 [end of meinberlin/apps/documents/views.py] [start of meinberlin/apps/documents/models.py] 1 from ckeditor_uploader.fields import RichTextUploadingField 2 from django.contrib.contenttypes.fields import GenericRelation 3 from django.db import models 4 from django.urls import reverse 5 from django.utils.functional import cached_property 6 7 from adhocracy4 import transforms 8 from adhocracy4.comments import models as comment_models 9 from adhocracy4.models import base 10 from adhocracy4.modules import models as module_models 11 12 13 class Chapter(module_models.Item): 14 name = models.CharField(max_length=120) 15 comments = GenericRelation(comment_models.Comment, 16 related_query_name='chapter', 17 object_id_field='object_pk') 18 weight = models.PositiveIntegerField(default=0) 19 20 class Meta: 21 ordering = ('weight',) 22 23 def __str__(self): 24 return "{}_chapter_{}".format(str(self.module), self.pk) 25 26 def get_absolute_url(self): 27 return reverse('meinberlin_documents:chapter-detail', 28 args=[str(self.pk)]) 29 30 @cached_property 31 def prev(self): 32 return Chapter.objects\ 33 .filter(module=self.module)\ 34 .filter(weight__lt=self.weight)\ 35 .order_by('-weight')\ 36 .first() 37 38 @cached_property 39 def next(self): 40 return Chapter.objects\ 41 .filter(module=self.module)\ 42 .filter(weight__gt=self.weight)\ 43 .order_by('weight')\ 44 .first() 45 46 47 class Paragraph(base.TimeStampedModel): 48 name = models.CharField(max_length=120, blank=True) 49 text = RichTextUploadingField(config_name='image-editor') 50 weight = models.PositiveIntegerField() 51 chapter = models.ForeignKey(Chapter, 52 on_delete=models.CASCADE, 53 related_name='paragraphs') 54 comments = GenericRelation(comment_models.Comment, 55 related_query_name='paragraph', 56 object_id_field='object_pk') 57 58 class Meta: 59 ordering = ('weight',) 60 61 def __str__(self): 62 return "{}_paragraph_{}".format(str(self.chapter), self.weight) 63 64 def save(self, *args, **kwargs): 65 self.text = transforms.clean_html_field( 66 self.text, 'image-editor') 67 super().save(*args, **kwargs) 68 69 def get_absolute_url(self): 70 return reverse('meinberlin_documents:paragraph-detail', 71 args=[str(self.pk)]) 72 73 @property 74 def creator(self): 75 return self.chapter.creator 76 77 @property 78 def project(self): 79 return self.module.project 80 81 @property 82 def module(self): 83 return self.chapter.module 84 [end of meinberlin/apps/documents/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/documents/models.py b/meinberlin/apps/documents/models.py --- a/meinberlin/apps/documents/models.py +++ b/meinberlin/apps/documents/models.py @@ -24,6 +24,12 @@ return "{}_chapter_{}".format(str(self.module), self.pk) def get_absolute_url(self): + if (self.project.display_timeline + and not self.module.is_in_module_cluster): + return ('{}?initialSlide={}' + .format(reverse('meinberlin_documents:chapter-detail', + args=[str(self.pk)]), + self.module.get_timeline_index)) return reverse('meinberlin_documents:chapter-detail', args=[str(self.pk)]) diff --git a/meinberlin/apps/documents/views.py b/meinberlin/apps/documents/views.py --- a/meinberlin/apps/documents/views.py +++ b/meinberlin/apps/documents/views.py @@ -1,5 +1,6 @@ from django.http import Http404 from django.urls import reverse +from django.utils.functional import cached_property from django.utils.translation import ugettext_lazy as _ from django.views import generic @@ -32,7 +33,7 @@ get_context_from_object = True def get_context_data(self, **kwargs): - context = super(ChapterDetailView, self).get_context_data(**kwargs) + context = super().get_context_data(**kwargs) context['chapter_list'] = self.chapter_list return context @@ -40,6 +41,15 @@ def chapter_list(self): return models.Chapter.objects.filter(module=self.module) + @cached_property + def extends(self): + if self.url_name == 'module-detail': + return 'a4modules/module_detail.html' + if self.url_name == 'chapter-detail': + if self.module.is_in_module_cluster: + return 'a4modules/module_detail.html' + return 'a4projects/project_detail.html' + class DocumentDetailView(ChapterDetailView): get_context_from_object = False
{"golden_diff": "diff --git a/meinberlin/apps/documents/models.py b/meinberlin/apps/documents/models.py\n--- a/meinberlin/apps/documents/models.py\n+++ b/meinberlin/apps/documents/models.py\n@@ -24,6 +24,12 @@\n return \"{}_chapter_{}\".format(str(self.module), self.pk)\n \n def get_absolute_url(self):\n+ if (self.project.display_timeline\n+ and not self.module.is_in_module_cluster):\n+ return ('{}?initialSlide={}'\n+ .format(reverse('meinberlin_documents:chapter-detail',\n+ args=[str(self.pk)]),\n+ self.module.get_timeline_index))\n return reverse('meinberlin_documents:chapter-detail',\n args=[str(self.pk)])\n \ndiff --git a/meinberlin/apps/documents/views.py b/meinberlin/apps/documents/views.py\n--- a/meinberlin/apps/documents/views.py\n+++ b/meinberlin/apps/documents/views.py\n@@ -1,5 +1,6 @@\n from django.http import Http404\n from django.urls import reverse\n+from django.utils.functional import cached_property\n from django.utils.translation import ugettext_lazy as _\n from django.views import generic\n \n@@ -32,7 +33,7 @@\n get_context_from_object = True\n \n def get_context_data(self, **kwargs):\n- context = super(ChapterDetailView, self).get_context_data(**kwargs)\n+ context = super().get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n return context\n \n@@ -40,6 +41,15 @@\n def chapter_list(self):\n return models.Chapter.objects.filter(module=self.module)\n \n+ @cached_property\n+ def extends(self):\n+ if self.url_name == 'module-detail':\n+ return 'a4modules/module_detail.html'\n+ if self.url_name == 'chapter-detail':\n+ if self.module.is_in_module_cluster:\n+ return 'a4modules/module_detail.html'\n+ return 'a4projects/project_detail.html'\n+\n \n class DocumentDetailView(ChapterDetailView):\n get_context_from_object = False\n", "issue": "in text commenting links to first chapter lead back to the project view\n...so I am back to the timeline and the tiles\r\n\r\nThere are two links doing that: \"back to chapter\" in paragraph detail view and \"previous chapter\" in second chapter.\n", "before_files": [{"content": "from django.http import Http404\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as dashboard_mixins\nfrom adhocracy4.projects.mixins import DisplayProjectOrModuleMixin\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom meinberlin.apps.exports.views import DashboardExportView\n\nfrom . import models\n\n\nclass DocumentDashboardView(ProjectMixin,\n dashboard_mixins.DashboardBaseMixin,\n dashboard_mixins.DashboardComponentMixin,\n generic.TemplateView):\n template_name = 'meinberlin_documents/document_dashboard.html'\n permission_required = 'a4projects.change_project'\n\n def get_permission_object(self):\n return self.project\n\n\nclass ChapterDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView,\n DisplayProjectOrModuleMixin):\n model = models.Chapter\n permission_required = 'meinberlin_documents.view_chapter'\n get_context_from_object = True\n\n def get_context_data(self, **kwargs):\n context = super(ChapterDetailView, self).get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n return context\n\n @property\n def chapter_list(self):\n return models.Chapter.objects.filter(module=self.module)\n\n\nclass DocumentDetailView(ChapterDetailView):\n get_context_from_object = False\n\n def get_object(self):\n first_chapter = models.Chapter.objects \\\n .filter(module=self.module) \\\n .first()\n\n if not first_chapter:\n raise Http404(_('Document has no chapters defined.'))\n return first_chapter\n\n\nclass ParagraphDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView):\n model = models.Paragraph\n permission_required = 'meinberlin_documents.view_paragraph'\n\n\nclass DocumentDashboardExportView(DashboardExportView):\n template_name = 'meinberlin_exports/export_dashboard.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['comment_export'] = reverse(\n 'a4dashboard:document-comment-export',\n kwargs={'module_slug': self.module.slug})\n return context\n", "path": "meinberlin/apps/documents/views.py"}, {"content": "from ckeditor_uploader.fields import RichTextUploadingField\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\n\nfrom adhocracy4 import transforms\nfrom adhocracy4.comments import models as comment_models\nfrom adhocracy4.models import base\nfrom adhocracy4.modules import models as module_models\n\n\nclass Chapter(module_models.Item):\n name = models.CharField(max_length=120)\n comments = GenericRelation(comment_models.Comment,\n related_query_name='chapter',\n object_id_field='object_pk')\n weight = models.PositiveIntegerField(default=0)\n\n class Meta:\n ordering = ('weight',)\n\n def __str__(self):\n return \"{}_chapter_{}\".format(str(self.module), self.pk)\n\n def get_absolute_url(self):\n return reverse('meinberlin_documents:chapter-detail',\n args=[str(self.pk)])\n\n @cached_property\n def prev(self):\n return Chapter.objects\\\n .filter(module=self.module)\\\n .filter(weight__lt=self.weight)\\\n .order_by('-weight')\\\n .first()\n\n @cached_property\n def next(self):\n return Chapter.objects\\\n .filter(module=self.module)\\\n .filter(weight__gt=self.weight)\\\n .order_by('weight')\\\n .first()\n\n\nclass Paragraph(base.TimeStampedModel):\n name = models.CharField(max_length=120, blank=True)\n text = RichTextUploadingField(config_name='image-editor')\n weight = models.PositiveIntegerField()\n chapter = models.ForeignKey(Chapter,\n on_delete=models.CASCADE,\n related_name='paragraphs')\n comments = GenericRelation(comment_models.Comment,\n related_query_name='paragraph',\n object_id_field='object_pk')\n\n class Meta:\n ordering = ('weight',)\n\n def __str__(self):\n return \"{}_paragraph_{}\".format(str(self.chapter), self.weight)\n\n def save(self, *args, **kwargs):\n self.text = transforms.clean_html_field(\n self.text, 'image-editor')\n super().save(*args, **kwargs)\n\n def get_absolute_url(self):\n return reverse('meinberlin_documents:paragraph-detail',\n args=[str(self.pk)])\n\n @property\n def creator(self):\n return self.chapter.creator\n\n @property\n def project(self):\n return self.module.project\n\n @property\n def module(self):\n return self.chapter.module\n", "path": "meinberlin/apps/documents/models.py"}]}
1,924
457
gh_patches_debug_32403
rasdani/github-patches
git_diff
microsoft__hi-ml-504
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Histo dataset mounting script does not show files permanently Files are only visible while the script is running, but disappear once terminated. This could be a consequence of the library updates in #455. </issue> <code> [start of hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py] 1 # ------------------------------------------------------------------------------------------ 2 # Copyright (c) Microsoft Corporation. All rights reserved. 3 # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. 4 # ------------------------------------------------------------------------------------------ 5 from pathlib import Path 6 import sys 7 8 himl_histo_root_dir = Path(__file__).parent.parent.parent 9 himl_root = himl_histo_root_dir.parent.parent 10 himl_azure_package_root = himl_root / "hi-ml-azure" / "src" 11 sys.path.insert(0, str(himl_azure_package_root)) 12 13 from health_azure import DatasetConfig # noqa: E402 14 from health_azure.utils import get_workspace # noqa: E402 15 16 17 def mount_dataset(dataset_id: str) -> str: 18 ws = get_workspace() 19 target_folder = "/tmp/datasets/" + dataset_id 20 dataset = DatasetConfig(name=dataset_id, target_folder=target_folder, use_mounting=True) 21 dataset_mount_folder, mount_ctx = dataset.to_input_dataset_local(ws) 22 assert mount_ctx is not None # for mypy 23 mount_ctx.start() 24 return str(dataset_mount_folder) 25 26 27 if __name__ == '__main__': 28 import argparse 29 parser = argparse.ArgumentParser() 30 # Run this script as "python mount_azure_dataset.py --dataset_id TCGA-CRCk" 31 parser.add_argument('--dataset_id', type=str, 32 help='Name of the Azure dataset e.g. PANDA or TCGA-CRCk') 33 args = parser.parse_args() 34 mount_dataset(args.dataset_id) 35 [end of hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py b/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py --- a/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py +++ b/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py @@ -4,6 +4,8 @@ # ------------------------------------------------------------------------------------------ from pathlib import Path import sys +import time +from typing import Any himl_histo_root_dir = Path(__file__).parent.parent.parent himl_root = himl_histo_root_dir.parent.parent @@ -14,14 +16,14 @@ from health_azure.utils import get_workspace # noqa: E402 -def mount_dataset(dataset_id: str) -> str: +def mount_dataset(dataset_id: str) -> Any: ws = get_workspace() target_folder = "/tmp/datasets/" + dataset_id dataset = DatasetConfig(name=dataset_id, target_folder=target_folder, use_mounting=True) - dataset_mount_folder, mount_ctx = dataset.to_input_dataset_local(ws) + _, mount_ctx = dataset.to_input_dataset_local(ws) assert mount_ctx is not None # for mypy mount_ctx.start() - return str(dataset_mount_folder) + return mount_ctx if __name__ == '__main__': @@ -31,4 +33,9 @@ parser.add_argument('--dataset_id', type=str, help='Name of the Azure dataset e.g. PANDA or TCGA-CRCk') args = parser.parse_args() - mount_dataset(args.dataset_id) + # It is essential that the mount context is returned from the mounting function and referenced here. + # If not, mounting will be stopped, and the files are no longer available. + _ = mount_dataset(args.dataset_id) + print("The mounted dataset will only be available while this script is running. Press Ctrl-C to terminate it.`") + while True: + time.sleep(60)
{"golden_diff": "diff --git a/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py b/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py\n--- a/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py\n+++ b/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py\n@@ -4,6 +4,8 @@\n # ------------------------------------------------------------------------------------------\n from pathlib import Path\n import sys\n+import time\n+from typing import Any\n \n himl_histo_root_dir = Path(__file__).parent.parent.parent\n himl_root = himl_histo_root_dir.parent.parent\n@@ -14,14 +16,14 @@\n from health_azure.utils import get_workspace # noqa: E402\n \n \n-def mount_dataset(dataset_id: str) -> str:\n+def mount_dataset(dataset_id: str) -> Any:\n ws = get_workspace()\n target_folder = \"/tmp/datasets/\" + dataset_id\n dataset = DatasetConfig(name=dataset_id, target_folder=target_folder, use_mounting=True)\n- dataset_mount_folder, mount_ctx = dataset.to_input_dataset_local(ws)\n+ _, mount_ctx = dataset.to_input_dataset_local(ws)\n assert mount_ctx is not None # for mypy\n mount_ctx.start()\n- return str(dataset_mount_folder)\n+ return mount_ctx\n \n \n if __name__ == '__main__':\n@@ -31,4 +33,9 @@\n parser.add_argument('--dataset_id', type=str,\n help='Name of the Azure dataset e.g. PANDA or TCGA-CRCk')\n args = parser.parse_args()\n- mount_dataset(args.dataset_id)\n+ # It is essential that the mount context is returned from the mounting function and referenced here.\n+ # If not, mounting will be stopped, and the files are no longer available.\n+ _ = mount_dataset(args.dataset_id)\n+ print(\"The mounted dataset will only be available while this script is running. Press Ctrl-C to terminate it.`\")\n+ while True:\n+ time.sleep(60)\n", "issue": "Histo dataset mounting script does not show files permanently\nFiles are only visible while the script is running, but disappear once terminated. \r\nThis could be a consequence of the library updates in #455.\n", "before_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nfrom pathlib import Path\nimport sys\n\nhiml_histo_root_dir = Path(__file__).parent.parent.parent\nhiml_root = himl_histo_root_dir.parent.parent\nhiml_azure_package_root = himl_root / \"hi-ml-azure\" / \"src\"\nsys.path.insert(0, str(himl_azure_package_root))\n\nfrom health_azure import DatasetConfig # noqa: E402\nfrom health_azure.utils import get_workspace # noqa: E402\n\n\ndef mount_dataset(dataset_id: str) -> str:\n ws = get_workspace()\n target_folder = \"/tmp/datasets/\" + dataset_id\n dataset = DatasetConfig(name=dataset_id, target_folder=target_folder, use_mounting=True)\n dataset_mount_folder, mount_ctx = dataset.to_input_dataset_local(ws)\n assert mount_ctx is not None # for mypy\n mount_ctx.start()\n return str(dataset_mount_folder)\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n # Run this script as \"python mount_azure_dataset.py --dataset_id TCGA-CRCk\"\n parser.add_argument('--dataset_id', type=str,\n help='Name of the Azure dataset e.g. PANDA or TCGA-CRCk')\n args = parser.parse_args()\n mount_dataset(args.dataset_id)\n", "path": "hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py"}]}
998
464
gh_patches_debug_22820
rasdani/github-patches
git_diff
bokeh__bokeh-8672
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use utf-8 encoding to open notebook file. Currently, `ipynb` file is opened by following code: ``` with open(filename) as f: pass ``` https://github.com/bokeh/bokeh/blob/master/bokeh/application/handlers/notebook.py#L117 I suggest to open the file with `encoding="utf-8"`, otherwise the serve command will raise error on Windows system. </issue> <code> [start of bokeh/application/handlers/notebook.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors. 3 # All rights reserved. 4 # 5 # The full license is in the file LICENSE.txt, distributed with this software. 6 #----------------------------------------------------------------------------- 7 ''' Provide a Bokeh Application Handler to build up documents by running 8 the code from Jupyter notebook (``.ipynb``) files. 9 10 This handler is configured with the filename of a Jupyter notebook. When a 11 Bokeh application calls ``modify_doc``, the code from all the notebook cells 12 is collected and executed to process a new Document for a session. When the 13 notebook code is executed, the Document being modified will be available as 14 ``curdoc``, and any optionally provided ``args`` will be available as 15 ``sys.argv``. 16 17 ''' 18 19 #----------------------------------------------------------------------------- 20 # Boilerplate 21 #----------------------------------------------------------------------------- 22 from __future__ import absolute_import, division, print_function, unicode_literals 23 24 import re 25 import sys 26 import logging 27 log = logging.getLogger(__name__) 28 29 #----------------------------------------------------------------------------- 30 # Imports 31 #----------------------------------------------------------------------------- 32 33 # Standard library imports 34 35 # External imports 36 37 # Bokeh imports 38 from ...util.dependencies import import_required 39 from .code import CodeHandler 40 41 #----------------------------------------------------------------------------- 42 # Globals and constants 43 #----------------------------------------------------------------------------- 44 45 __all__ = ( 46 'NotebookHandler', 47 ) 48 49 #----------------------------------------------------------------------------- 50 # General API 51 #----------------------------------------------------------------------------- 52 53 #----------------------------------------------------------------------------- 54 # Dev API 55 #----------------------------------------------------------------------------- 56 57 class NotebookHandler(CodeHandler): 58 ''' A Handler that uses code in a Jupyter notebook for modifying Bokeh 59 Documents. 60 61 ''' 62 63 _logger_text = "%s: call to %s() ignored when running notebooks with the 'bokeh' command." 64 65 _origin = "Notebook" 66 67 def __init__(self, *args, **kwargs): 68 ''' 69 70 Keywords: 71 filename (str) : a path to a Jupyter notebook (".ipynb") file 72 73 ''' 74 nbformat = import_required('nbformat', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.') 75 nbconvert = import_required('nbconvert', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.') 76 77 if 'filename' not in kwargs: 78 raise ValueError('Must pass a filename to NotebookHandler') 79 80 81 class StripMagicsProcessor(nbconvert.preprocessors.Preprocessor): 82 """ 83 Preprocessor to convert notebooks to Python source while stripping 84 out all magics (i.e IPython specific syntax). 85 """ 86 87 _magic_pattern = re.compile(r'^\s*(?P<magic>%%\w\w+)($|(\s+))') 88 89 def strip_magics(self, source): 90 """ 91 Given the source of a cell, filter out all cell and line magics. 92 """ 93 filtered=[] 94 for line in source.splitlines(): 95 match = self._magic_pattern.match(line) 96 if match is None: 97 filtered.append(line) 98 else: 99 msg = 'Stripping out IPython magic {magic} in code cell {cell}' 100 message = msg.format(cell=self._cell_counter, magic=match.group('magic')) 101 log.warning(message) 102 return '\n'.join(filtered) 103 104 def preprocess_cell(self, cell, resources, index): 105 if cell['cell_type'] == 'code': 106 self._cell_counter += 1 107 cell['source'] = self.strip_magics(cell['source']) 108 return cell, resources 109 110 def __call__(self, nb, resources): 111 self._cell_counter = 0 112 return self.preprocess(nb,resources) 113 114 preprocessors=[StripMagicsProcessor()] 115 filename = kwargs['filename'] 116 117 with open(filename) as f: 118 nb = nbformat.read(f, nbformat.NO_CONVERT) 119 exporter = nbconvert.PythonExporter() 120 121 for preprocessor in preprocessors: 122 exporter.register_preprocessor(preprocessor) 123 124 source, _ = exporter.from_notebook_node(nb) 125 source = source.replace('get_ipython().run_line_magic', '') 126 source = source.replace('get_ipython().magic', '') 127 128 if sys.version_info.major == 2 and isinstance(source, unicode): # NOQA 129 source = source.encode('utf-8') 130 kwargs['source'] = source 131 132 super(NotebookHandler, self).__init__(*args, **kwargs) 133 134 #----------------------------------------------------------------------------- 135 # Private API 136 #----------------------------------------------------------------------------- 137 138 #----------------------------------------------------------------------------- 139 # Code 140 #----------------------------------------------------------------------------- 141 [end of bokeh/application/handlers/notebook.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bokeh/application/handlers/notebook.py b/bokeh/application/handlers/notebook.py --- a/bokeh/application/handlers/notebook.py +++ b/bokeh/application/handlers/notebook.py @@ -21,8 +21,6 @@ #----------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function, unicode_literals -import re -import sys import logging log = logging.getLogger(__name__) @@ -31,6 +29,9 @@ #----------------------------------------------------------------------------- # Standard library imports +import io +import re +import sys # External imports @@ -114,7 +115,7 @@ preprocessors=[StripMagicsProcessor()] filename = kwargs['filename'] - with open(filename) as f: + with io.open(filename, encoding="utf-8") as f: nb = nbformat.read(f, nbformat.NO_CONVERT) exporter = nbconvert.PythonExporter()
{"golden_diff": "diff --git a/bokeh/application/handlers/notebook.py b/bokeh/application/handlers/notebook.py\n--- a/bokeh/application/handlers/notebook.py\n+++ b/bokeh/application/handlers/notebook.py\n@@ -21,8 +21,6 @@\n #-----------------------------------------------------------------------------\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n-import re\n-import sys\n import logging\n log = logging.getLogger(__name__)\n \n@@ -31,6 +29,9 @@\n #-----------------------------------------------------------------------------\n \n # Standard library imports\n+import io\n+import re\n+import sys\n \n # External imports\n \n@@ -114,7 +115,7 @@\n preprocessors=[StripMagicsProcessor()]\n filename = kwargs['filename']\n \n- with open(filename) as f:\n+ with io.open(filename, encoding=\"utf-8\") as f:\n nb = nbformat.read(f, nbformat.NO_CONVERT)\n exporter = nbconvert.PythonExporter()\n", "issue": "Use utf-8 encoding to open notebook file.\nCurrently, `ipynb` file is opened by following code:\r\n\r\n```\r\n with open(filename) as f:\r\n pass\r\n```\r\n\r\nhttps://github.com/bokeh/bokeh/blob/master/bokeh/application/handlers/notebook.py#L117\r\n\r\nI suggest to open the file with `encoding=\"utf-8\"`, otherwise the serve command will raise error on Windows system.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' Provide a Bokeh Application Handler to build up documents by running\nthe code from Jupyter notebook (``.ipynb``) files.\n\nThis handler is configured with the filename of a Jupyter notebook. When a\nBokeh application calls ``modify_doc``, the code from all the notebook cells\nis collected and executed to process a new Document for a session. When the\nnotebook code is executed, the Document being modified will be available as\n``curdoc``, and any optionally provided ``args`` will be available as\n``sys.argv``.\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport re\nimport sys\nimport logging\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\n\n# External imports\n\n# Bokeh imports\nfrom ...util.dependencies import import_required\nfrom .code import CodeHandler\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'NotebookHandler',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\nclass NotebookHandler(CodeHandler):\n ''' A Handler that uses code in a Jupyter notebook for modifying Bokeh\n Documents.\n\n '''\n\n _logger_text = \"%s: call to %s() ignored when running notebooks with the 'bokeh' command.\"\n\n _origin = \"Notebook\"\n\n def __init__(self, *args, **kwargs):\n '''\n\n Keywords:\n filename (str) : a path to a Jupyter notebook (\".ipynb\") file\n\n '''\n nbformat = import_required('nbformat', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.')\n nbconvert = import_required('nbconvert', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.')\n\n if 'filename' not in kwargs:\n raise ValueError('Must pass a filename to NotebookHandler')\n\n\n class StripMagicsProcessor(nbconvert.preprocessors.Preprocessor):\n \"\"\"\n Preprocessor to convert notebooks to Python source while stripping\n out all magics (i.e IPython specific syntax).\n \"\"\"\n\n _magic_pattern = re.compile(r'^\\s*(?P<magic>%%\\w\\w+)($|(\\s+))')\n\n def strip_magics(self, source):\n \"\"\"\n Given the source of a cell, filter out all cell and line magics.\n \"\"\"\n filtered=[]\n for line in source.splitlines():\n match = self._magic_pattern.match(line)\n if match is None:\n filtered.append(line)\n else:\n msg = 'Stripping out IPython magic {magic} in code cell {cell}'\n message = msg.format(cell=self._cell_counter, magic=match.group('magic'))\n log.warning(message)\n return '\\n'.join(filtered)\n\n def preprocess_cell(self, cell, resources, index):\n if cell['cell_type'] == 'code':\n self._cell_counter += 1\n cell['source'] = self.strip_magics(cell['source'])\n return cell, resources\n\n def __call__(self, nb, resources):\n self._cell_counter = 0\n return self.preprocess(nb,resources)\n\n preprocessors=[StripMagicsProcessor()]\n filename = kwargs['filename']\n\n with open(filename) as f:\n nb = nbformat.read(f, nbformat.NO_CONVERT)\n exporter = nbconvert.PythonExporter()\n\n for preprocessor in preprocessors:\n exporter.register_preprocessor(preprocessor)\n\n source, _ = exporter.from_notebook_node(nb)\n source = source.replace('get_ipython().run_line_magic', '')\n source = source.replace('get_ipython().magic', '')\n\n if sys.version_info.major == 2 and isinstance(source, unicode): # NOQA\n source = source.encode('utf-8')\n kwargs['source'] = source\n\n super(NotebookHandler, self).__init__(*args, **kwargs)\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", "path": "bokeh/application/handlers/notebook.py"}]}
1,896
220
gh_patches_debug_6925
rasdani/github-patches
git_diff
mindsdb__mindsdb-2205
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Result of query with same column names with return different column names Issue exists for any 'internal' tables. If query multiple columns with same name: ``` select 1 as "a", 1 as "a" from information_schema.tables limit 1; ``` then result will be: ``` +------+------+ | a | a_2 | +------+------+ | 1 | 1 | +------+------+ ``` </issue> <code> [start of mindsdb/api/mysql/mysql_proxy/utilities/sql.py] 1 import duckdb 2 import numpy as np 3 from mindsdb_sql import parse_sql 4 from mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy 5 from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender 6 7 from mindsdb.utilities.log import log 8 9 10 def _remove_table_name(root): 11 if isinstance(root, BinaryOperation): 12 _remove_table_name(root.args[0]) 13 _remove_table_name(root.args[1]) 14 elif isinstance(root, Identifier): 15 root.parts = [root.parts[-1]] 16 17 18 def query_df(df, query): 19 """ Perform simple query ('select' from one table, without subqueries and joins) on DataFrame. 20 21 Args: 22 df (pandas.DataFrame): data 23 query (mindsdb_sql.parser.ast.Select | str): select query 24 25 Returns: 26 pandas.DataFrame 27 """ 28 29 if isinstance(query, str): 30 query_ast = parse_sql(query, dialect='mysql') 31 else: 32 query_ast = query 33 34 if isinstance(query_ast, Select) is False or isinstance(query_ast.from_table, Identifier) is False: 35 raise Exception("Only 'SELECT from TABLE' statements supported for internal query") 36 37 query_ast.from_table.parts = ['df_table'] 38 for identifier in query_ast.targets: 39 if isinstance(identifier, Identifier): 40 identifier.parts = [identifier.parts[-1]] 41 if isinstance(query_ast.order_by, list): 42 for orderby in query_ast.order_by: 43 if isinstance(orderby, OrderBy) and isinstance(orderby.field, Identifier): 44 orderby.field.parts = [orderby.field.parts[-1]] 45 _remove_table_name(query_ast.where) 46 47 render = SqlalchemyRender('postgres') 48 try: 49 query_str = render.get_string(query_ast, with_failback=False) 50 except Exception as e: 51 log.error(f"Exception during query casting to 'postgres' dialect. Query: {str(query)}. Error: {e}") 52 query_str = render.get_string(query_ast, with_failback=True) 53 54 res = duckdb.query_df(df, 'df_table', query_str) 55 result_df = res.df() 56 result_df = result_df.replace({np.nan: None}) 57 return result_df 58 [end of mindsdb/api/mysql/mysql_proxy/utilities/sql.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py --- a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py +++ b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py @@ -54,4 +54,13 @@ res = duckdb.query_df(df, 'df_table', query_str) result_df = res.df() result_df = result_df.replace({np.nan: None}) + + new_column_names = {} + real_column_names = [x[0] for x in res.description()] + for i, duck_column_name in enumerate(result_df.columns): + new_column_names[duck_column_name] = real_column_names[i] + result_df = result_df.rename( + new_column_names, + axis='columns' + ) return result_df
{"golden_diff": "diff --git a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n--- a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n+++ b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n@@ -54,4 +54,13 @@\n res = duckdb.query_df(df, 'df_table', query_str)\n result_df = res.df()\n result_df = result_df.replace({np.nan: None})\n+\n+ new_column_names = {}\n+ real_column_names = [x[0] for x in res.description()]\n+ for i, duck_column_name in enumerate(result_df.columns):\n+ new_column_names[duck_column_name] = real_column_names[i]\n+ result_df = result_df.rename(\n+ new_column_names,\n+ axis='columns'\n+ )\n return result_df\n", "issue": "Result of query with same column names with return different column names\nIssue exists for any 'internal' tables. If query multiple columns with same name:\r\n```\r\nselect 1 as \"a\", 1 as \"a\" from information_schema.tables limit 1;\r\n```\r\nthen result will be:\r\n```\r\n+------+------+\r\n| a | a_2 |\r\n+------+------+\r\n| 1 | 1 |\r\n+------+------+\r\n```\n", "before_files": [{"content": "import duckdb\nimport numpy as np\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy\nfrom mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\n\nfrom mindsdb.utilities.log import log\n\n\ndef _remove_table_name(root):\n if isinstance(root, BinaryOperation):\n _remove_table_name(root.args[0])\n _remove_table_name(root.args[1])\n elif isinstance(root, Identifier):\n root.parts = [root.parts[-1]]\n\n\ndef query_df(df, query):\n \"\"\" Perform simple query ('select' from one table, without subqueries and joins) on DataFrame.\n\n Args:\n df (pandas.DataFrame): data\n query (mindsdb_sql.parser.ast.Select | str): select query\n\n Returns:\n pandas.DataFrame\n \"\"\"\n\n if isinstance(query, str):\n query_ast = parse_sql(query, dialect='mysql')\n else:\n query_ast = query\n\n if isinstance(query_ast, Select) is False or isinstance(query_ast.from_table, Identifier) is False:\n raise Exception(\"Only 'SELECT from TABLE' statements supported for internal query\")\n\n query_ast.from_table.parts = ['df_table']\n for identifier in query_ast.targets:\n if isinstance(identifier, Identifier):\n identifier.parts = [identifier.parts[-1]]\n if isinstance(query_ast.order_by, list):\n for orderby in query_ast.order_by:\n if isinstance(orderby, OrderBy) and isinstance(orderby.field, Identifier):\n orderby.field.parts = [orderby.field.parts[-1]]\n _remove_table_name(query_ast.where)\n\n render = SqlalchemyRender('postgres')\n try:\n query_str = render.get_string(query_ast, with_failback=False)\n except Exception as e:\n log.error(f\"Exception during query casting to 'postgres' dialect. Query: {str(query)}. Error: {e}\")\n query_str = render.get_string(query_ast, with_failback=True)\n\n res = duckdb.query_df(df, 'df_table', query_str)\n result_df = res.df()\n result_df = result_df.replace({np.nan: None})\n return result_df\n", "path": "mindsdb/api/mysql/mysql_proxy/utilities/sql.py"}]}
1,202
190
gh_patches_debug_1190
rasdani/github-patches
git_diff
boto__botocore-1117
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support Python 3.6 Python 3.6 got released, and some distro (like Fedora) are swithcing to it. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import botocore 3 import sys 4 5 from setuptools import setup, find_packages 6 7 8 requires = ['jmespath>=0.7.1,<1.0.0', 9 'python-dateutil>=2.1,<3.0.0', 10 'docutils>=0.10'] 11 12 13 if sys.version_info[:2] == (2, 6): 14 # For python2.6 we have a few other dependencies. 15 # First we need an ordered dictionary so we use the 16 # 2.6 backport. 17 requires.append('ordereddict==1.1') 18 # Then we need simplejson. This is because we need 19 # a json version that allows us to specify we want to 20 # use an ordereddict instead of a normal dict for the 21 # JSON objects. The 2.7 json module has this. For 2.6 22 # we need simplejson. 23 requires.append('simplejson==3.3.0') 24 25 26 setup( 27 name='botocore', 28 version=botocore.__version__, 29 description='Low-level, data-driven core of boto 3.', 30 long_description=open('README.rst').read(), 31 author='Amazon Web Services', 32 url='https://github.com/boto/botocore', 33 scripts=[], 34 packages=find_packages(exclude=['tests*']), 35 package_data={'botocore': ['data/*.json', 'data/*/*.json'], 36 'botocore.vendored.requests': ['*.pem']}, 37 include_package_data=True, 38 install_requires=requires, 39 extras_require={ 40 ':python_version=="2.6"': [ 41 'ordereddict==1.1', 42 'simplejson==3.3.0', 43 ] 44 }, 45 license="Apache License 2.0", 46 classifiers=( 47 'Development Status :: 5 - Production/Stable', 48 'Intended Audience :: Developers', 49 'Intended Audience :: System Administrators', 50 'Natural Language :: English', 51 'License :: OSI Approved :: Apache Software License', 52 'Programming Language :: Python', 53 'Programming Language :: Python :: 2', 54 'Programming Language :: Python :: 2.6', 55 'Programming Language :: Python :: 2.7', 56 'Programming Language :: Python :: 3', 57 'Programming Language :: Python :: 3.3', 58 'Programming Language :: Python :: 3.4', 59 'Programming Language :: Python :: 3.5', 60 ), 61 ) 62 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -57,5 +57,6 @@ 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', ), )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,5 +57,6 @@\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n+ 'Programming Language :: Python :: 3.6',\n ),\n )\n", "issue": "Support Python 3.6\nPython 3.6 got released, and some distro (like Fedora) are swithcing to it.\n", "before_files": [{"content": "#!/usr/bin/env python\nimport botocore\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nrequires = ['jmespath>=0.7.1,<1.0.0',\n 'python-dateutil>=2.1,<3.0.0',\n 'docutils>=0.10']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have a few other dependencies.\n # First we need an ordered dictionary so we use the\n # 2.6 backport.\n requires.append('ordereddict==1.1')\n # Then we need simplejson. This is because we need\n # a json version that allows us to specify we want to\n # use an ordereddict instead of a normal dict for the\n # JSON objects. The 2.7 json module has this. For 2.6\n # we need simplejson.\n requires.append('simplejson==3.3.0')\n\n\nsetup(\n name='botocore',\n version=botocore.__version__,\n description='Low-level, data-driven core of boto 3.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='https://github.com/boto/botocore',\n scripts=[],\n packages=find_packages(exclude=['tests*']),\n package_data={'botocore': ['data/*.json', 'data/*/*.json'],\n 'botocore.vendored.requests': ['*.pem']},\n include_package_data=True,\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'ordereddict==1.1',\n 'simplejson==3.3.0',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ),\n)\n", "path": "setup.py"}]}
1,211
85
gh_patches_debug_37121
rasdani/github-patches
git_diff
DataBiosphere__toil-902
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Build takes too long </issue> <code> [start of setup.py] 1 # Copyright (C) 2015 UCSC Computational Genomics Lab 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import sys 16 from version import version 17 from setuptools import find_packages, setup 18 19 botoVersionRequired = 'boto==2.38.0' 20 21 kwargs = dict( 22 name='toil', 23 version=version, 24 description='Pipeline management software for clusters.', 25 author='Benedict Paten', 26 author_email='[email protected]', 27 url="https://github.com/BD2KGenomics/toil", 28 install_requires=[ 29 'bd2k-python-lib==1.13.dev14'], 30 tests_require=[ 31 'mock==1.0.1', 32 'pytest==2.8.3'], 33 test_suite='toil', 34 extras_require={ 35 'mesos': [ 36 'psutil==3.0.1'], 37 'aws': [ 38 botoVersionRequired, 39 'cgcloud-lib==1.4a1.dev195' ], 40 'azure': [ 41 'azure==1.0.3'], 42 'encryption': [ 43 'pynacl==0.3.0'], 44 'google': [ 45 'gcs_oauth2_boto_plugin==1.9', 46 botoVersionRequired], 47 'cwl': [ 48 'cwltool==1.0.20160425140546']}, 49 package_dir={'': 'src'}, 50 packages=find_packages('src', exclude=['*.test']), 51 entry_points={ 52 'console_scripts': [ 53 'toil = toil.utils.toilMain:main', 54 '_toil_worker = toil.worker:main', 55 'cwltoil = toil.cwl.cwltoil:main [cwl]', 56 'cwl-runner = toil.cwl.cwltoil:main [cwl]', 57 '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]']}) 58 59 from setuptools.command.test import test as TestCommand 60 61 62 class PyTest(TestCommand): 63 user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")] 64 65 def initialize_options(self): 66 TestCommand.initialize_options(self) 67 self.pytest_args = [] 68 69 def finalize_options(self): 70 TestCommand.finalize_options(self) 71 self.test_args = [] 72 self.test_suite = True 73 74 def run_tests(self): 75 import pytest 76 # Sanitize command line arguments to avoid confusing Toil code attempting to parse them 77 sys.argv[1:] = [] 78 errno = pytest.main(self.pytest_args) 79 sys.exit(errno) 80 81 82 kwargs['cmdclass'] = {'test': PyTest} 83 84 setup(**kwargs) 85 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -12,13 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys from version import version from setuptools import find_packages, setup -botoVersionRequired = 'boto==2.38.0' +botoRequirement = 'boto==2.38.0' -kwargs = dict( +setup( name='toil', version=version, description='Pipeline management software for clusters.', @@ -27,15 +26,11 @@ url="https://github.com/BD2KGenomics/toil", install_requires=[ 'bd2k-python-lib==1.13.dev14'], - tests_require=[ - 'mock==1.0.1', - 'pytest==2.8.3'], - test_suite='toil', extras_require={ 'mesos': [ 'psutil==3.0.1'], 'aws': [ - botoVersionRequired, + botoRequirement, 'cgcloud-lib==1.4a1.dev195' ], 'azure': [ 'azure==1.0.3'], @@ -43,7 +38,7 @@ 'pynacl==0.3.0'], 'google': [ 'gcs_oauth2_boto_plugin==1.9', - botoVersionRequired], + botoRequirement], 'cwl': [ 'cwltool==1.0.20160425140546']}, package_dir={'': 'src'}, @@ -55,30 +50,3 @@ 'cwltoil = toil.cwl.cwltoil:main [cwl]', 'cwl-runner = toil.cwl.cwltoil:main [cwl]', '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]']}) - -from setuptools.command.test import test as TestCommand - - -class PyTest(TestCommand): - user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")] - - def initialize_options(self): - TestCommand.initialize_options(self) - self.pytest_args = [] - - def finalize_options(self): - TestCommand.finalize_options(self) - self.test_args = [] - self.test_suite = True - - def run_tests(self): - import pytest - # Sanitize command line arguments to avoid confusing Toil code attempting to parse them - sys.argv[1:] = [] - errno = pytest.main(self.pytest_args) - sys.exit(errno) - - -kwargs['cmdclass'] = {'test': PyTest} - -setup(**kwargs)
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,13 +12,12 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import sys\n from version import version\n from setuptools import find_packages, setup\n \n-botoVersionRequired = 'boto==2.38.0'\n+botoRequirement = 'boto==2.38.0'\n \n-kwargs = dict(\n+setup(\n name='toil',\n version=version,\n description='Pipeline management software for clusters.',\n@@ -27,15 +26,11 @@\n url=\"https://github.com/BD2KGenomics/toil\",\n install_requires=[\n 'bd2k-python-lib==1.13.dev14'],\n- tests_require=[\n- 'mock==1.0.1',\n- 'pytest==2.8.3'],\n- test_suite='toil',\n extras_require={\n 'mesos': [\n 'psutil==3.0.1'],\n 'aws': [\n- botoVersionRequired,\n+ botoRequirement,\n 'cgcloud-lib==1.4a1.dev195' ],\n 'azure': [\n 'azure==1.0.3'],\n@@ -43,7 +38,7 @@\n 'pynacl==0.3.0'],\n 'google': [\n 'gcs_oauth2_boto_plugin==1.9',\n- botoVersionRequired],\n+ botoRequirement],\n 'cwl': [\n 'cwltool==1.0.20160425140546']},\n package_dir={'': 'src'},\n@@ -55,30 +50,3 @@\n 'cwltoil = toil.cwl.cwltoil:main [cwl]',\n 'cwl-runner = toil.cwl.cwltoil:main [cwl]',\n '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]']})\n-\n-from setuptools.command.test import test as TestCommand\n-\n-\n-class PyTest(TestCommand):\n- user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n-\n- def initialize_options(self):\n- TestCommand.initialize_options(self)\n- self.pytest_args = []\n-\n- def finalize_options(self):\n- TestCommand.finalize_options(self)\n- self.test_args = []\n- self.test_suite = True\n-\n- def run_tests(self):\n- import pytest\n- # Sanitize command line arguments to avoid confusing Toil code attempting to parse them\n- sys.argv[1:] = []\n- errno = pytest.main(self.pytest_args)\n- sys.exit(errno)\n-\n-\n-kwargs['cmdclass'] = {'test': PyTest}\n-\n-setup(**kwargs)\n", "issue": "Build takes too long\n\n", "before_files": [{"content": "# Copyright (C) 2015 UCSC Computational Genomics Lab\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom version import version\nfrom setuptools import find_packages, setup\n\nbotoVersionRequired = 'boto==2.38.0'\n\nkwargs = dict(\n name='toil',\n version=version,\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='[email protected]',\n url=\"https://github.com/BD2KGenomics/toil\",\n install_requires=[\n 'bd2k-python-lib==1.13.dev14'],\n tests_require=[\n 'mock==1.0.1',\n 'pytest==2.8.3'],\n test_suite='toil',\n extras_require={\n 'mesos': [\n 'psutil==3.0.1'],\n 'aws': [\n botoVersionRequired,\n 'cgcloud-lib==1.4a1.dev195' ],\n 'azure': [\n 'azure==1.0.3'],\n 'encryption': [\n 'pynacl==0.3.0'],\n 'google': [\n 'gcs_oauth2_boto_plugin==1.9',\n botoVersionRequired],\n 'cwl': [\n 'cwltool==1.0.20160425140546']},\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['*.test']),\n entry_points={\n 'console_scripts': [\n 'toil = toil.utils.toilMain:main',\n '_toil_worker = toil.worker:main',\n 'cwltoil = toil.cwl.cwltoil:main [cwl]',\n 'cwl-runner = toil.cwl.cwltoil:main [cwl]',\n '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]']})\n\nfrom setuptools.command.test import test as TestCommand\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n # Sanitize command line arguments to avoid confusing Toil code attempting to parse them\n sys.argv[1:] = []\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nkwargs['cmdclass'] = {'test': PyTest}\n\nsetup(**kwargs)\n", "path": "setup.py"}]}
1,396
629
gh_patches_debug_15829
rasdani/github-patches
git_diff
opsdroid__opsdroid-523
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Setting typing delay to 0 means bot never responds # Description When a typing delay is defined it is an integer which states how many characters per second opsdroid should type. If you set this to `0` then it can type no characters per second and therefore never responds. ## Steps to Reproduce - Configure a connector with a `typing-delay` of `0`. - Talk to the bot ## Experienced Functionality The bot never responds. ## Expected Functionality I would expect the bot to respond eventually. Perhaps even immediately and log an error stating `0` is an invalid response. Thinking a little more about this I wonder if this functionality is the wrong way round. With `thinking-delay` that is the number of seconds that opsdroid should way before responding, so as the number gets bigger the longer opsdroid waits. But with `typing-delay` it is the number of characters per second that opsdroid can type, so the bigger the number the less opsdroid waits. The word `delay` suggests that the higher the number the longer the wait. These opposites could be confusing (it confused me this evening). I think it would be good that both numbers represent time, rather than one representing characters per second. That would involve changing it to be seconds per character. This would result in a bigger number causing a bigger delay. ## Versions - **Opsdroid version:** master - **Python version:** 3.5.4 - **OS/Docker version:** macOS 10.13 ## Configuration File ```yaml connectors: - name: shell typing-delay: 0 skills: - name: hello ``` </issue> <code> [start of opsdroid/message.py] 1 """Class to encapsulate a message.""" 2 3 from datetime import datetime 4 from copy import copy 5 import asyncio 6 from random import randrange 7 8 from opsdroid.helper import get_opsdroid 9 10 11 class Message: 12 # pylint: disable=too-few-public-methods 13 """A message object.""" 14 15 def __init__(self, text, user, room, connector, raw_message=None): 16 """Create object with minimum properties.""" 17 self.created = datetime.now() 18 self.text = text 19 self.user = user 20 self.room = room 21 self.connector = connector 22 self.raw_message = raw_message 23 self.regex = None 24 self.responded_to = False 25 26 async def _thinking_delay(self): 27 """Make opsdroid wait x-seconds before responding.""" 28 seconds = self.connector.configuration.get('thinking-delay', 0) 29 30 if isinstance(seconds, list): 31 seconds = randrange(seconds[0], seconds[1]) 32 33 await asyncio.sleep(seconds) 34 35 async def _typing_delay(self, text): 36 """Simulate typing, takes an int(characters per second typed).""" 37 try: 38 char_per_sec = self.connector.configuration['typing-delay'] 39 char_count = len(text) 40 await asyncio.sleep(char_count//char_per_sec) 41 except KeyError: 42 pass 43 44 async def respond(self, text, room=None): 45 """Respond to this message using the connector it was created by.""" 46 opsdroid = get_opsdroid() 47 response = copy(self) 48 response.text = text 49 50 if 'thinking-delay' in self.connector.configuration or \ 51 'typing-delay' in self.connector.configuration: 52 await self._thinking_delay() 53 await self._typing_delay(response.text) 54 55 await self.connector.respond(response, room) 56 if not self.responded_to: 57 now = datetime.now() 58 opsdroid.stats["total_responses"] = \ 59 opsdroid.stats["total_responses"] + 1 60 opsdroid.stats["total_response_time"] = \ 61 opsdroid.stats["total_response_time"] + \ 62 (now - self.created).total_seconds() 63 self.responded_to = True 64 65 async def react(self, emoji): 66 """React to this message using the connector it was created by.""" 67 if 'thinking-delay' in self.connector.configuration: 68 await self._thinking_delay() 69 return await self.connector.react(self, emoji) 70 [end of opsdroid/message.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opsdroid/message.py b/opsdroid/message.py --- a/opsdroid/message.py +++ b/opsdroid/message.py @@ -33,13 +33,14 @@ await asyncio.sleep(seconds) async def _typing_delay(self, text): - """Simulate typing, takes an int(characters per second typed).""" - try: - char_per_sec = self.connector.configuration['typing-delay'] - char_count = len(text) - await asyncio.sleep(char_count//char_per_sec) - except KeyError: - pass + """Simulate typing, takes an int or float to delay reply.""" + seconds = self.connector.configuration.get('typing-delay', 0) + char_count = len(text) + + if isinstance(seconds, list): + seconds = randrange(seconds[0], seconds[1]) + + await asyncio.sleep(char_count*seconds) async def respond(self, text, room=None): """Respond to this message using the connector it was created by."""
{"golden_diff": "diff --git a/opsdroid/message.py b/opsdroid/message.py\n--- a/opsdroid/message.py\n+++ b/opsdroid/message.py\n@@ -33,13 +33,14 @@\n await asyncio.sleep(seconds)\n \n async def _typing_delay(self, text):\n- \"\"\"Simulate typing, takes an int(characters per second typed).\"\"\"\n- try:\n- char_per_sec = self.connector.configuration['typing-delay']\n- char_count = len(text)\n- await asyncio.sleep(char_count//char_per_sec)\n- except KeyError:\n- pass\n+ \"\"\"Simulate typing, takes an int or float to delay reply.\"\"\"\n+ seconds = self.connector.configuration.get('typing-delay', 0)\n+ char_count = len(text)\n+\n+ if isinstance(seconds, list):\n+ seconds = randrange(seconds[0], seconds[1])\n+\n+ await asyncio.sleep(char_count*seconds)\n \n async def respond(self, text, room=None):\n \"\"\"Respond to this message using the connector it was created by.\"\"\"\n", "issue": "Setting typing delay to 0 means bot never responds\n# Description\r\nWhen a typing delay is defined it is an integer which states how many characters per second opsdroid should type. If you set this to `0` then it can type no characters per second and therefore never responds.\r\n\r\n\r\n## Steps to Reproduce\r\n- Configure a connector with a `typing-delay` of `0`. \r\n- Talk to the bot\r\n\r\n\r\n## Experienced Functionality\r\nThe bot never responds.\r\n\r\n\r\n## Expected Functionality\r\nI would expect the bot to respond eventually. Perhaps even immediately and log an error stating `0` is an invalid response.\r\n\r\nThinking a little more about this I wonder if this functionality is the wrong way round. With `thinking-delay` that is the number of seconds that opsdroid should way before responding, so as the number gets bigger the longer opsdroid waits. But with `typing-delay` it is the number of characters per second that opsdroid can type, so the bigger the number the less opsdroid waits. The word `delay` suggests that the higher the number the longer the wait.\r\n\r\nThese opposites could be confusing (it confused me this evening). I think it would be good that both numbers represent time, rather than one representing characters per second. That would involve changing it to be seconds per character. This would result in a bigger number causing a bigger delay. \r\n\r\n\r\n## Versions\r\n- **Opsdroid version:** master\r\n- **Python version:** 3.5.4\r\n- **OS/Docker version:** macOS 10.13\r\n\r\n## Configuration File\r\n\r\n\r\n```yaml\r\nconnectors:\r\n - name: shell\r\n typing-delay: 0\r\n\r\nskills:\r\n - name: hello\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Class to encapsulate a message.\"\"\"\n\nfrom datetime import datetime\nfrom copy import copy\nimport asyncio\nfrom random import randrange\n\nfrom opsdroid.helper import get_opsdroid\n\n\nclass Message:\n # pylint: disable=too-few-public-methods\n \"\"\"A message object.\"\"\"\n\n def __init__(self, text, user, room, connector, raw_message=None):\n \"\"\"Create object with minimum properties.\"\"\"\n self.created = datetime.now()\n self.text = text\n self.user = user\n self.room = room\n self.connector = connector\n self.raw_message = raw_message\n self.regex = None\n self.responded_to = False\n\n async def _thinking_delay(self):\n \"\"\"Make opsdroid wait x-seconds before responding.\"\"\"\n seconds = self.connector.configuration.get('thinking-delay', 0)\n\n if isinstance(seconds, list):\n seconds = randrange(seconds[0], seconds[1])\n\n await asyncio.sleep(seconds)\n\n async def _typing_delay(self, text):\n \"\"\"Simulate typing, takes an int(characters per second typed).\"\"\"\n try:\n char_per_sec = self.connector.configuration['typing-delay']\n char_count = len(text)\n await asyncio.sleep(char_count//char_per_sec)\n except KeyError:\n pass\n\n async def respond(self, text, room=None):\n \"\"\"Respond to this message using the connector it was created by.\"\"\"\n opsdroid = get_opsdroid()\n response = copy(self)\n response.text = text\n\n if 'thinking-delay' in self.connector.configuration or \\\n 'typing-delay' in self.connector.configuration:\n await self._thinking_delay()\n await self._typing_delay(response.text)\n\n await self.connector.respond(response, room)\n if not self.responded_to:\n now = datetime.now()\n opsdroid.stats[\"total_responses\"] = \\\n opsdroid.stats[\"total_responses\"] + 1\n opsdroid.stats[\"total_response_time\"] = \\\n opsdroid.stats[\"total_response_time\"] + \\\n (now - self.created).total_seconds()\n self.responded_to = True\n\n async def react(self, emoji):\n \"\"\"React to this message using the connector it was created by.\"\"\"\n if 'thinking-delay' in self.connector.configuration:\n await self._thinking_delay()\n return await self.connector.react(self, emoji)\n", "path": "opsdroid/message.py"}]}
1,528
230
gh_patches_debug_27031
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-2927
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `I3037` false positives in `AWS::ECS::TaskDefinition.ContainerDefinitions.Command` ### CloudFormation Lint Version 0.83.0 ### What operating system are you using? Mac ### Describe the bug `I3037` issues (*List has a duplicate value*) are reported if the command specified in `AWS::ECS::TaskDefinition.ContainerDefinitions.Command` has repeating entries, e.g. the values of several command arguments are the same. ### Expected behavior No issue is detected. ### Reproduction template ```json { "AWSTemplateFormatVersion": "2010-09-09", "Description": "This template deploys an ECS task definition.", "Resources": { "MyECSTaskDefinition": { "Type": "AWS::ECS::TaskDefinition", "Properties": { "ContainerDefinitions": [ { "Command": [ "do_something", "--foo", "1", "--bar", "1" ], "Image": "my-image", "Name": "my-task" } ] } } } } ``` </issue> <code> [start of src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py] 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 import hashlib 6 import json 7 8 from cfnlint.helpers import RESOURCE_SPECS 9 from cfnlint.rules import CloudFormationLintRule, RuleMatch 10 11 12 class ListDuplicatesAllowed(CloudFormationLintRule): 13 """Check if duplicates exist in a List""" 14 15 id = "I3037" 16 shortdesc = "Check if a list that allows duplicates has any duplicates" 17 description = ( 18 "Certain lists support duplicate items." 19 "Provide an alert when list of strings or numbers have repeats." 20 ) 21 source_url = "https://github.com/aws-cloudformation/cfn-python-lint/blob/main/docs/rules.md#rules-1" 22 tags = ["resources", "property", "list"] 23 24 def initialize(self, cfn): 25 """Initialize the rule""" 26 for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get( 27 "ResourceTypes" 28 ): 29 self.resource_property_types.append(resource_type_spec) 30 for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get( 31 "PropertyTypes" 32 ): 33 self.resource_sub_property_types.append(property_type_spec) 34 35 def _check_duplicates(self, values, path, scenario=None): 36 """Check for Duplicates""" 37 matches = [] 38 39 list_items = [] 40 if isinstance(values, list): 41 for index, value in enumerate(values): 42 value_hash = hashlib.sha1( 43 json.dumps(value, sort_keys=True).encode("utf-8") 44 ).hexdigest() 45 if value_hash in list_items: 46 if not scenario: 47 message = "List has a duplicate value at {0}" 48 matches.append( 49 RuleMatch( 50 path + [index], 51 message.format("/".join(map(str, path + [index]))), 52 ) 53 ) 54 else: 55 scenario_text = " and ".join( 56 [f'condition "{k}" is {v}' for (k, v) in scenario.items()] 57 ) 58 message = "List has a duplicate value at {0} when {1}" 59 matches.append( 60 RuleMatch( 61 path, 62 message.format("/".join(map(str, path)), scenario_text), 63 ) 64 ) 65 66 list_items.append(value_hash) 67 68 return matches 69 70 def check_duplicates(self, values, path, cfn): 71 """Check for duplicates""" 72 matches = [] 73 74 if isinstance(values, list): 75 matches.extend(self._check_duplicates(values, path)) 76 elif isinstance(values, dict): 77 props = cfn.get_object_without_conditions(values) 78 for prop in props: 79 matches.extend( 80 self._check_duplicates( 81 prop.get("Object"), path, prop.get("Scenario") 82 ) 83 ) 84 85 return matches 86 87 def check(self, cfn, properties, value_specs, path): 88 """Check itself""" 89 matches = [] 90 for p_value, p_path in properties.items_safe(path[:]): 91 for prop in p_value: 92 if prop in value_specs: 93 property_type = value_specs.get(prop).get("Type") 94 primitive_type = value_specs.get(prop).get("PrimitiveItemType") 95 duplicates_allowed = value_specs.get(prop).get( 96 "DuplicatesAllowed", False 97 ) 98 if ( 99 property_type == "List" 100 and duplicates_allowed 101 and primitive_type in ["String", "Integer"] 102 ): 103 matches.extend( 104 self.check_duplicates(p_value[prop], p_path + [prop], cfn) 105 ) 106 107 return matches 108 109 def match_resource_sub_properties(self, properties, property_type, path, cfn): 110 """Match for sub properties""" 111 matches = [] 112 113 specs = ( 114 RESOURCE_SPECS.get(cfn.regions[0]) 115 .get("PropertyTypes") 116 .get(property_type, {}) 117 .get("Properties", {}) 118 ) 119 matches.extend(self.check(cfn, properties, specs, path)) 120 121 return matches 122 123 def match_resource_properties(self, properties, resource_type, path, cfn): 124 """Check CloudFormation Properties""" 125 matches = [] 126 127 specs = ( 128 RESOURCE_SPECS.get(cfn.regions[0]) 129 .get("ResourceTypes") 130 .get(resource_type, {}) 131 .get("Properties", {}) 132 ) 133 matches.extend(self.check(cfn, properties, specs, path)) 134 135 return matches 136 [end of src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py b/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py --- a/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py +++ b/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py @@ -21,6 +21,10 @@ source_url = "https://github.com/aws-cloudformation/cfn-python-lint/blob/main/docs/rules.md#rules-1" tags = ["resources", "property", "list"] + def __init__(self): + super().__init__() + self.exceptions = ["Command"] + def initialize(self, cfn): """Initialize the rule""" for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get( @@ -71,11 +75,15 @@ """Check for duplicates""" matches = [] + if path[-1] in self.exceptions: + return matches if isinstance(values, list): matches.extend(self._check_duplicates(values, path)) elif isinstance(values, dict): props = cfn.get_object_without_conditions(values) for prop in props: + if prop in self.exceptions: + continue matches.extend( self._check_duplicates( prop.get("Object"), path, prop.get("Scenario")
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py b/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py\n--- a/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py\n+++ b/src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py\n@@ -21,6 +21,10 @@\n source_url = \"https://github.com/aws-cloudformation/cfn-python-lint/blob/main/docs/rules.md#rules-1\"\n tags = [\"resources\", \"property\", \"list\"]\n \n+ def __init__(self):\n+ super().__init__()\n+ self.exceptions = [\"Command\"]\n+\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(\n@@ -71,11 +75,15 @@\n \"\"\"Check for duplicates\"\"\"\n matches = []\n \n+ if path[-1] in self.exceptions:\n+ return matches\n if isinstance(values, list):\n matches.extend(self._check_duplicates(values, path))\n elif isinstance(values, dict):\n props = cfn.get_object_without_conditions(values)\n for prop in props:\n+ if prop in self.exceptions:\n+ continue\n matches.extend(\n self._check_duplicates(\n prop.get(\"Object\"), path, prop.get(\"Scenario\")\n", "issue": "`I3037` false positives in `AWS::ECS::TaskDefinition.ContainerDefinitions.Command`\n### CloudFormation Lint Version\n\n0.83.0\n\n### What operating system are you using?\n\nMac\n\n### Describe the bug\n\n`I3037` issues (*List has a duplicate value*) are reported if the command specified in `AWS::ECS::TaskDefinition.ContainerDefinitions.Command` has repeating entries, e.g. the values of several command arguments are the same.\n\n### Expected behavior\n\nNo issue is detected.\n\n### Reproduction template\n\n```json\r\n{\r\n \"AWSTemplateFormatVersion\": \"2010-09-09\",\r\n \"Description\": \"This template deploys an ECS task definition.\",\r\n \"Resources\": {\r\n \"MyECSTaskDefinition\": {\r\n \"Type\": \"AWS::ECS::TaskDefinition\",\r\n \"Properties\": {\r\n \"ContainerDefinitions\": [\r\n {\r\n \"Command\": [\r\n \"do_something\",\r\n \"--foo\",\r\n \"1\",\r\n \"--bar\",\r\n \"1\"\r\n ],\r\n \"Image\": \"my-image\",\r\n \"Name\": \"my-task\"\r\n }\r\n ]\r\n }\r\n }\r\n }\r\n}\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport hashlib\nimport json\n\nfrom cfnlint.helpers import RESOURCE_SPECS\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass ListDuplicatesAllowed(CloudFormationLintRule):\n \"\"\"Check if duplicates exist in a List\"\"\"\n\n id = \"I3037\"\n shortdesc = \"Check if a list that allows duplicates has any duplicates\"\n description = (\n \"Certain lists support duplicate items.\"\n \"Provide an alert when list of strings or numbers have repeats.\"\n )\n source_url = \"https://github.com/aws-cloudformation/cfn-python-lint/blob/main/docs/rules.md#rules-1\"\n tags = [\"resources\", \"property\", \"list\"]\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(\n \"ResourceTypes\"\n ):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(\n \"PropertyTypes\"\n ):\n self.resource_sub_property_types.append(property_type_spec)\n\n def _check_duplicates(self, values, path, scenario=None):\n \"\"\"Check for Duplicates\"\"\"\n matches = []\n\n list_items = []\n if isinstance(values, list):\n for index, value in enumerate(values):\n value_hash = hashlib.sha1(\n json.dumps(value, sort_keys=True).encode(\"utf-8\")\n ).hexdigest()\n if value_hash in list_items:\n if not scenario:\n message = \"List has a duplicate value at {0}\"\n matches.append(\n RuleMatch(\n path + [index],\n message.format(\"/\".join(map(str, path + [index]))),\n )\n )\n else:\n scenario_text = \" and \".join(\n [f'condition \"{k}\" is {v}' for (k, v) in scenario.items()]\n )\n message = \"List has a duplicate value at {0} when {1}\"\n matches.append(\n RuleMatch(\n path,\n message.format(\"/\".join(map(str, path)), scenario_text),\n )\n )\n\n list_items.append(value_hash)\n\n return matches\n\n def check_duplicates(self, values, path, cfn):\n \"\"\"Check for duplicates\"\"\"\n matches = []\n\n if isinstance(values, list):\n matches.extend(self._check_duplicates(values, path))\n elif isinstance(values, dict):\n props = cfn.get_object_without_conditions(values)\n for prop in props:\n matches.extend(\n self._check_duplicates(\n prop.get(\"Object\"), path, prop.get(\"Scenario\")\n )\n )\n\n return matches\n\n def check(self, cfn, properties, value_specs, path):\n \"\"\"Check itself\"\"\"\n matches = []\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in value_specs:\n property_type = value_specs.get(prop).get(\"Type\")\n primitive_type = value_specs.get(prop).get(\"PrimitiveItemType\")\n duplicates_allowed = value_specs.get(prop).get(\n \"DuplicatesAllowed\", False\n )\n if (\n property_type == \"List\"\n and duplicates_allowed\n and primitive_type in [\"String\", \"Integer\"]\n ):\n matches.extend(\n self.check_duplicates(p_value[prop], p_path + [prop], cfn)\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n\n specs = (\n RESOURCE_SPECS.get(cfn.regions[0])\n .get(\"PropertyTypes\")\n .get(property_type, {})\n .get(\"Properties\", {})\n )\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n specs = (\n RESOURCE_SPECS.get(cfn.regions[0])\n .get(\"ResourceTypes\")\n .get(resource_type, {})\n .get(\"Properties\", {})\n )\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/ListDuplicatesAllowed.py"}]}
2,022
288
gh_patches_debug_54191
rasdani/github-patches
git_diff
vyperlang__vyper-293
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Need a .travis.yml file to properly run travis tests ^ </issue> <code> [start of setup.py] 1 # -*- coding: utf-8 -*- 2 3 from setuptools import setup, find_packages 4 5 6 with open('README.md') as f: 7 readme = f.read() 8 9 with open('LICENSE') as f: 10 license = f.read() 11 12 setup( 13 name='viper', 14 version='0.0.1', 15 description='Viper Programming Language for Ethereum', 16 long_description=readme, 17 author='Vitalik Buterin', 18 author_email='', 19 url='https://github.com/ethereum/viper', 20 license=license, 21 packages=find_packages(exclude=('tests', 'docs')), 22 install_requires=[ 23 'ethereum == 1.3.7', 24 'serpent', 25 'pytest-cov', 26 'pytest-runner', # Must be after pytest-cov or it will not work 27 # due to https://github.com/pypa/setuptools/issues/196 28 ], 29 scripts=['bin/viper'] 30 ) 31 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ license=license, packages=find_packages(exclude=('tests', 'docs')), install_requires=[ - 'ethereum == 1.3.7', + 'ethereum==2.0.4', 'serpent', 'pytest-cov', 'pytest-runner', # Must be after pytest-cov or it will not work
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,7 @@\n license=license,\n packages=find_packages(exclude=('tests', 'docs')),\n install_requires=[\n- 'ethereum == 1.3.7',\n+ 'ethereum==2.0.4',\n 'serpent',\n 'pytest-cov',\n 'pytest-runner', # Must be after pytest-cov or it will not work\n", "issue": "Need a .travis.yml file to properly run travis tests\n^\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nsetup(\n name='viper',\n version='0.0.1',\n description='Viper Programming Language for Ethereum',\n long_description=readme,\n author='Vitalik Buterin',\n author_email='',\n url='https://github.com/ethereum/viper',\n license=license,\n packages=find_packages(exclude=('tests', 'docs')),\n install_requires=[\n 'ethereum == 1.3.7',\n 'serpent',\n 'pytest-cov',\n 'pytest-runner', # Must be after pytest-cov or it will not work\n # due to https://github.com/pypa/setuptools/issues/196\n ],\n scripts=['bin/viper']\n)\n", "path": "setup.py"}]}
798
106
gh_patches_debug_16056
rasdani/github-patches
git_diff
Zeroto521__my-data-toolkit-835
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BUG: `geocentroid`, coordinates should multiply weights <!-- Thanks for contributing a pull request! Please follow these standard acronyms to start the commit message: - ENH: enhancement - BUG: bug fix - DOC: documentation - TYP: type annotations - TST: addition or modification of tests - MAINT: maintenance commit (refactoring, typos, etc.) - BLD: change related to building - REL: related to releasing - API: an (incompatible) API change - DEP: deprecate something, or remove a deprecated object - DEV: development tool or utility - REV: revert an earlier commit - PERF: performance improvement - BOT: always commit via a bot - CI: related to CI or CD - CLN: Code cleanup --> - [x] closes #833 - [ ] whatsnew entry </issue> <code> [start of dtoolkit/geoaccessor/geoseries/geocentroid.py] 1 import geopandas as gpd 2 import numpy as np 3 import pandas as pd 4 from shapely import Point 5 6 from dtoolkit.geoaccessor.geoseries.geodistance import geodistance 7 from dtoolkit.geoaccessor.geoseries.xy import xy 8 from dtoolkit.geoaccessor.register import register_geoseries_method 9 10 11 @register_geoseries_method 12 def geocentroid( 13 s: gpd.GeoSeries, 14 /, 15 weights: pd.Series = None, 16 max_iter: int = 300, 17 tol: float = 1e-5, 18 ) -> Point: 19 r""" 20 Return the centroid of all points via the center of gravity method. 21 22 .. math:: 23 24 \left\{\begin{matrix} 25 d_i &=& D(P(\bar{x}_n, \bar{y}_n), P(x_i, y_i)) \\ 26 \bar{x}_0 &=& \frac{\sum w_i x_i}{\sum w_i} \\ 27 \bar{y}_0 &=& \frac{\sum w_i y_i}{\sum w_i} \\ 28 \bar{x}_{n+1} &=& \frac{\sum w_i x_i / d_i}{\sum w_i / d_i} \\ 29 \bar{y}_{n+1} &=& \frac{\sum w_i y_i / d_i}{\sum w_i / d_i} \\ 30 \end{matrix}\right. 31 32 Parameters 33 ---------- 34 weights : Hashable or 1d array-like, optional 35 - None : All weights will be set to 1. 36 - Hashable : Only for DataFrame, the column name. 37 - 1d array-like : The weights of each point. 38 39 max_iter : int, default 300 40 Maximum number of iterations to perform. 41 42 tol : float, default 1e-5 43 Tolerance for convergence. 44 45 Returns 46 ------- 47 Point 48 49 See Also 50 -------- 51 geopandas.GeoSeries.centroid 52 dtoolkit.geoaccessor.geoseries.geocentroid 53 dtoolkit.geoaccessor.geodataframe.geocentroid 54 55 Examples 56 -------- 57 >>> import dtoolkit.geoaccessor 58 >>> import geopandas as gpd 59 >>> from shapely import Point 60 >>> df = gpd.GeoDataFrame( 61 ... { 62 ... "weights": [1, 2, 3], 63 ... "geometry": [Point(100, 32), Point(120, 50), Point(122, 55)], 64 ... }, 65 ... crs=4326, 66 ... ) 67 >>> df 68 weights geometry 69 0 1 POINT (100.00000 32.00000) 70 1 2 POINT (120.00000 50.00000) 71 2 3 POINT (122.00000 55.00000) 72 >>> df.geocentroid() 73 <POINT (120 50)> 74 75 Set weights for each point. 76 77 >>> df.geocentroid("weights") 78 <POINT (121.999 54.999)> 79 >>> df.geocentroid([1, 2, 3]) 80 <POINT (121.999 54.999)> 81 """ 82 83 weights = np.asarray(weights) if weights is not None else 1 84 coord = xy(s) 85 X = coord.mul(weights, axis=0).mean() 86 for _ in range(max_iter): 87 dis = geodistance(s, Point(*X.tolist())).rdiv(1).mul(weights, axis=0) 88 Xt = coord.mul(dis, axis=0).sum() / dis.sum() 89 90 if ((X - Xt).abs() <= tol).all(): 91 X = Xt 92 break 93 94 X = Xt 95 96 return Point(*X.tolist()) 97 [end of dtoolkit/geoaccessor/geoseries/geocentroid.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dtoolkit/geoaccessor/geoseries/geocentroid.py b/dtoolkit/geoaccessor/geoseries/geocentroid.py --- a/dtoolkit/geoaccessor/geoseries/geocentroid.py +++ b/dtoolkit/geoaccessor/geoseries/geocentroid.py @@ -80,11 +80,15 @@ <POINT (121.999 54.999)> """ - weights = np.asarray(weights) if weights is not None else 1 coord = xy(s) + if len(coord) == 1: + return Point(coord.iloc[0]) + + weights = np.asarray(weights) if weights is not None else 1 X = coord.mul(weights, axis=0).mean() + for _ in range(max_iter): - dis = geodistance(s, Point(*X.tolist())).rdiv(1).mul(weights, axis=0) + dis = geodistance(s, Point(X)).rdiv(1).mul(weights, axis=0) Xt = coord.mul(dis, axis=0).sum() / dis.sum() if ((X - Xt).abs() <= tol).all(): @@ -93,4 +97,4 @@ X = Xt - return Point(*X.tolist()) + return Point(X)
{"golden_diff": "diff --git a/dtoolkit/geoaccessor/geoseries/geocentroid.py b/dtoolkit/geoaccessor/geoseries/geocentroid.py\n--- a/dtoolkit/geoaccessor/geoseries/geocentroid.py\n+++ b/dtoolkit/geoaccessor/geoseries/geocentroid.py\n@@ -80,11 +80,15 @@\n <POINT (121.999 54.999)>\n \"\"\"\n \n- weights = np.asarray(weights) if weights is not None else 1\n coord = xy(s)\n+ if len(coord) == 1:\n+ return Point(coord.iloc[0])\n+\n+ weights = np.asarray(weights) if weights is not None else 1\n X = coord.mul(weights, axis=0).mean()\n+\n for _ in range(max_iter):\n- dis = geodistance(s, Point(*X.tolist())).rdiv(1).mul(weights, axis=0)\n+ dis = geodistance(s, Point(X)).rdiv(1).mul(weights, axis=0)\n Xt = coord.mul(dis, axis=0).sum() / dis.sum()\n \n if ((X - Xt).abs() <= tol).all():\n@@ -93,4 +97,4 @@\n \n X = Xt\n \n- return Point(*X.tolist())\n+ return Point(X)\n", "issue": "BUG: `geocentroid`, coordinates should multiply weights\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [x] closes #833\r\n- [ ] whatsnew entry\r\n\n", "before_files": [{"content": "import geopandas as gpd\nimport numpy as np\nimport pandas as pd\nfrom shapely import Point\n\nfrom dtoolkit.geoaccessor.geoseries.geodistance import geodistance\nfrom dtoolkit.geoaccessor.geoseries.xy import xy\nfrom dtoolkit.geoaccessor.register import register_geoseries_method\n\n\n@register_geoseries_method\ndef geocentroid(\n s: gpd.GeoSeries,\n /,\n weights: pd.Series = None,\n max_iter: int = 300,\n tol: float = 1e-5,\n) -> Point:\n r\"\"\"\n Return the centroid of all points via the center of gravity method.\n\n .. math::\n\n \\left\\{\\begin{matrix}\n d_i &=& D(P(\\bar{x}_n, \\bar{y}_n), P(x_i, y_i)) \\\\\n \\bar{x}_0 &=& \\frac{\\sum w_i x_i}{\\sum w_i} \\\\\n \\bar{y}_0 &=& \\frac{\\sum w_i y_i}{\\sum w_i} \\\\\n \\bar{x}_{n+1} &=& \\frac{\\sum w_i x_i / d_i}{\\sum w_i / d_i} \\\\\n \\bar{y}_{n+1} &=& \\frac{\\sum w_i y_i / d_i}{\\sum w_i / d_i} \\\\\n \\end{matrix}\\right.\n\n Parameters\n ----------\n weights : Hashable or 1d array-like, optional\n - None : All weights will be set to 1.\n - Hashable : Only for DataFrame, the column name.\n - 1d array-like : The weights of each point.\n\n max_iter : int, default 300\n Maximum number of iterations to perform.\n\n tol : float, default 1e-5\n Tolerance for convergence.\n\n Returns\n -------\n Point\n\n See Also\n --------\n geopandas.GeoSeries.centroid\n dtoolkit.geoaccessor.geoseries.geocentroid\n dtoolkit.geoaccessor.geodataframe.geocentroid\n\n Examples\n --------\n >>> import dtoolkit.geoaccessor\n >>> import geopandas as gpd\n >>> from shapely import Point\n >>> df = gpd.GeoDataFrame(\n ... {\n ... \"weights\": [1, 2, 3],\n ... \"geometry\": [Point(100, 32), Point(120, 50), Point(122, 55)],\n ... },\n ... crs=4326,\n ... )\n >>> df\n weights geometry\n 0 1 POINT (100.00000 32.00000)\n 1 2 POINT (120.00000 50.00000)\n 2 3 POINT (122.00000 55.00000)\n >>> df.geocentroid()\n <POINT (120 50)>\n\n Set weights for each point.\n\n >>> df.geocentroid(\"weights\")\n <POINT (121.999 54.999)>\n >>> df.geocentroid([1, 2, 3])\n <POINT (121.999 54.999)>\n \"\"\"\n\n weights = np.asarray(weights) if weights is not None else 1\n coord = xy(s)\n X = coord.mul(weights, axis=0).mean()\n for _ in range(max_iter):\n dis = geodistance(s, Point(*X.tolist())).rdiv(1).mul(weights, axis=0)\n Xt = coord.mul(dis, axis=0).sum() / dis.sum()\n\n if ((X - Xt).abs() <= tol).all():\n X = Xt\n break\n\n X = Xt\n\n return Point(*X.tolist())\n", "path": "dtoolkit/geoaccessor/geoseries/geocentroid.py"}]}
1,836
305
gh_patches_debug_40690
rasdani/github-patches
git_diff
hedyorg__hedy-1379
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add level 2 to hedy_translation.py Now that the keywords from level 1 can be translated, new levels can be added. </issue> <code> [start of hedy_translation.py] 1 from lark import Transformer, Tree 2 from hedy import get_keywords_for_language, ExtractAST, get_parser 3 4 5 TRANSPILER_LOOKUP = {} 6 7 8 def keywords_to_dict(to_lang="nl"): 9 """"Return a dictionary of keywords from language of choice. Key is english value is lang of choice""" 10 keywords = {} 11 keywords_from = get_keywords_for_language("en").replace("\n\n", "\n").splitlines() 12 13 keywords_to = get_keywords_for_language(to_lang).replace("\n\n", "\n").splitlines() 14 keywords_from_withoutlvl = [] 15 for line in keywords_from: 16 if line[0] != '/': 17 keywords_from_withoutlvl.append(line) 18 19 keywords_to_withoutlvl = [] 20 for line in keywords_to: 21 if line[0] != '/': 22 keywords_to_withoutlvl.append(line) 23 24 for line in range(len(keywords_from_withoutlvl)): 25 keywords[(keywords_from_withoutlvl[line].split('"'))[1]] = keywords_to_withoutlvl[line].split('"')[1] 26 27 return keywords 28 29 30 def translate_keywords(input_string, from_lang="nl", to_lang="nl", level=1): 31 """"Return code with keywords translated to language of choice in level of choice""" 32 parser = get_parser(level, from_lang) 33 34 punctuation_symbols = ['!', '?', '.'] 35 36 keywordDict = keywords_to_dict(to_lang) 37 program_root = parser.parse(input_string + '\n').children[0] 38 abstract_syntaxtree = ExtractAST().transform(program_root) 39 translator = TRANSPILER_LOOKUP[level] 40 abstract_syntaxtree = translator(keywordDict, punctuation_symbols).transform(program_root) 41 42 return abstract_syntaxtree 43 44 45 def hedy_translator(level): 46 def decorating(c): 47 TRANSPILER_LOOKUP[level] = c 48 c.level = level 49 return c 50 51 return decorating 52 53 54 @hedy_translator(level=1) 55 class ConvertToLang1(Transformer): 56 57 def __init__(self, keywords, punctuation_symbols): 58 self.keywords = keywords 59 self.punctuation_symbols = punctuation_symbols 60 __class__.level = 1 61 62 def command(self, args): 63 return args[0] 64 65 def program(self, args): 66 return '\n'.join([str(c) for c in args]) 67 68 def text(self, args): 69 return ''.join([str(c) for c in args]) 70 71 def invalid_space(self, args): 72 return " " + ''.join([str(c) for c in args]) 73 74 def print(self, args): 75 return self.keywords["print"] + " " + "".join([str(c) for c in args]) 76 77 def echo(self, args): 78 all_args = self.keywords["echo"] 79 if args: 80 all_args += " " 81 return all_args + "".join([str(c) for c in args]) 82 83 def ask(self, args): 84 return self.keywords["ask"] + " " + "".join([str(c) for c in args]) 85 86 def turn(self, args): 87 return self.keywords["turn"] + " " + "".join([str(c) for c in args]) 88 89 def forward(self, args): 90 return self.keywords["forward"] + " " + "".join([str(c) for c in args]) 91 92 def random(self, args): 93 return self.keywords["random"] + "".join([str(c) for c in args]) 94 95 def invalid(self, args): 96 return ' '.join([str(c) for c in args]) 97 98 def __default__(self, data, children, meta): 99 return Tree(data, children, meta) 100 [end of hedy_translation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hedy_translation.py b/hedy_translation.py --- a/hedy_translation.py +++ b/hedy_translation.py @@ -1,5 +1,5 @@ from lark import Transformer, Tree -from hedy import get_keywords_for_language, ExtractAST, get_parser +import hedy TRANSPILER_LOOKUP = {} @@ -8,9 +8,9 @@ def keywords_to_dict(to_lang="nl"): """"Return a dictionary of keywords from language of choice. Key is english value is lang of choice""" keywords = {} - keywords_from = get_keywords_for_language("en").replace("\n\n", "\n").splitlines() + keywords_from = hedy.get_keywords_for_language("en").replace("\n\n", "\n").splitlines() - keywords_to = get_keywords_for_language(to_lang).replace("\n\n", "\n").splitlines() + keywords_to = hedy.get_keywords_for_language(to_lang).replace("\n\n", "\n").splitlines() keywords_from_withoutlvl = [] for line in keywords_from: if line[0] != '/': @@ -29,13 +29,13 @@ def translate_keywords(input_string, from_lang="nl", to_lang="nl", level=1): """"Return code with keywords translated to language of choice in level of choice""" - parser = get_parser(level, from_lang) + parser = hedy.get_parser(level, from_lang) punctuation_symbols = ['!', '?', '.'] keywordDict = keywords_to_dict(to_lang) program_root = parser.parse(input_string + '\n').children[0] - abstract_syntaxtree = ExtractAST().transform(program_root) + abstract_syntaxtree = hedy.ExtractAST().transform(program_root) translator = TRANSPILER_LOOKUP[level] abstract_syntaxtree = translator(keywordDict, punctuation_symbols).transform(program_root) @@ -97,3 +97,50 @@ def __default__(self, data, children, meta): return Tree(data, children, meta) + +@hedy_translator(level=2) +class ConvertToLang2(ConvertToLang1): + + def assign(self, args): + return args[0] + " " + self.keywords["is"] + " " + ''.join([str(c) for c in args[1:]]) + + def print(self, args): + + argument_string = "" + i = 0 + + for argument in args: + # escape quotes if kids accidentally use them at level 2 + argument = hedy.process_characters_needing_escape(argument) + + # final argument and punctuation arguments do not have to be separated with a space, other do + if i == len(args) - 1 or args[i + 1] in self.punctuation_symbols: + space = '' + else: + space = " " + + argument_string += argument + space + + i = i + 1 + + return self.keywords["print"] + " " + argument_string + + def punctuation(self, args): + return ''.join([str(c) for c in args]) + + def var(self, args): + var = args[0] + all_parameters = ["'" + hedy.process_characters_needing_escape(a) + "'" for a in args[1:]] + return var + ''.join(all_parameters) + + def ask(self, args): + var = args[0] + all_parameters = [hedy.process_characters_needing_escape(a) for a in args] + + return all_parameters[0] + " " + self.keywords["is"] + " " + self.keywords["ask"] + " " + ''.join(all_parameters[1:]) + + def ask_dep_2(self, args): + return self.keywords["ask"] + " " + ''.join([str(c) for c in args]) + + def echo_dep_2(self, args): + return self.keywords["echo"] + " " + ''.join([str(c) for c in args])
{"golden_diff": "diff --git a/hedy_translation.py b/hedy_translation.py\n--- a/hedy_translation.py\n+++ b/hedy_translation.py\n@@ -1,5 +1,5 @@\n from lark import Transformer, Tree\n-from hedy import get_keywords_for_language, ExtractAST, get_parser\n+import hedy\n \n \n TRANSPILER_LOOKUP = {}\n@@ -8,9 +8,9 @@\n def keywords_to_dict(to_lang=\"nl\"):\n \"\"\"\"Return a dictionary of keywords from language of choice. Key is english value is lang of choice\"\"\"\n keywords = {}\n- keywords_from = get_keywords_for_language(\"en\").replace(\"\\n\\n\", \"\\n\").splitlines()\n+ keywords_from = hedy.get_keywords_for_language(\"en\").replace(\"\\n\\n\", \"\\n\").splitlines()\n \n- keywords_to = get_keywords_for_language(to_lang).replace(\"\\n\\n\", \"\\n\").splitlines()\n+ keywords_to = hedy.get_keywords_for_language(to_lang).replace(\"\\n\\n\", \"\\n\").splitlines()\n keywords_from_withoutlvl = []\n for line in keywords_from:\n if line[0] != '/':\n@@ -29,13 +29,13 @@\n \n def translate_keywords(input_string, from_lang=\"nl\", to_lang=\"nl\", level=1):\n \"\"\"\"Return code with keywords translated to language of choice in level of choice\"\"\"\n- parser = get_parser(level, from_lang)\n+ parser = hedy.get_parser(level, from_lang)\n \n punctuation_symbols = ['!', '?', '.']\n \n keywordDict = keywords_to_dict(to_lang)\n program_root = parser.parse(input_string + '\\n').children[0]\n- abstract_syntaxtree = ExtractAST().transform(program_root)\n+ abstract_syntaxtree = hedy.ExtractAST().transform(program_root)\n translator = TRANSPILER_LOOKUP[level]\n abstract_syntaxtree = translator(keywordDict, punctuation_symbols).transform(program_root)\n \n@@ -97,3 +97,50 @@\n \n def __default__(self, data, children, meta):\n return Tree(data, children, meta)\n+\n+@hedy_translator(level=2)\n+class ConvertToLang2(ConvertToLang1):\n+\n+ def assign(self, args):\n+ return args[0] + \" \" + self.keywords[\"is\"] + \" \" + ''.join([str(c) for c in args[1:]])\n+\n+ def print(self, args):\n+\n+ argument_string = \"\"\n+ i = 0\n+\n+ for argument in args:\n+ # escape quotes if kids accidentally use them at level 2\n+ argument = hedy.process_characters_needing_escape(argument)\n+\n+ # final argument and punctuation arguments do not have to be separated with a space, other do\n+ if i == len(args) - 1 or args[i + 1] in self.punctuation_symbols:\n+ space = ''\n+ else:\n+ space = \" \"\n+\n+ argument_string += argument + space\n+\n+ i = i + 1\n+\n+ return self.keywords[\"print\"] + \" \" + argument_string\n+\n+ def punctuation(self, args):\n+ return ''.join([str(c) for c in args])\n+\n+ def var(self, args):\n+ var = args[0]\n+ all_parameters = [\"'\" + hedy.process_characters_needing_escape(a) + \"'\" for a in args[1:]]\n+ return var + ''.join(all_parameters)\n+\n+ def ask(self, args):\n+ var = args[0]\n+ all_parameters = [hedy.process_characters_needing_escape(a) for a in args]\n+\n+ return all_parameters[0] + \" \" + self.keywords[\"is\"] + \" \" + self.keywords[\"ask\"] + \" \" + ''.join(all_parameters[1:])\n+\n+ def ask_dep_2(self, args):\n+ return self.keywords[\"ask\"] + \" \" + ''.join([str(c) for c in args])\n+\n+ def echo_dep_2(self, args):\n+ return self.keywords[\"echo\"] + \" \" + ''.join([str(c) for c in args])\n", "issue": "Add level 2 to hedy_translation.py\nNow that the keywords from level 1 can be translated, new levels can be added.\n", "before_files": [{"content": "from lark import Transformer, Tree\nfrom hedy import get_keywords_for_language, ExtractAST, get_parser\n\n\nTRANSPILER_LOOKUP = {}\n\n\ndef keywords_to_dict(to_lang=\"nl\"):\n \"\"\"\"Return a dictionary of keywords from language of choice. Key is english value is lang of choice\"\"\"\n keywords = {}\n keywords_from = get_keywords_for_language(\"en\").replace(\"\\n\\n\", \"\\n\").splitlines()\n\n keywords_to = get_keywords_for_language(to_lang).replace(\"\\n\\n\", \"\\n\").splitlines()\n keywords_from_withoutlvl = []\n for line in keywords_from:\n if line[0] != '/':\n keywords_from_withoutlvl.append(line)\n\n keywords_to_withoutlvl = []\n for line in keywords_to:\n if line[0] != '/':\n keywords_to_withoutlvl.append(line)\n\n for line in range(len(keywords_from_withoutlvl)):\n keywords[(keywords_from_withoutlvl[line].split('\"'))[1]] = keywords_to_withoutlvl[line].split('\"')[1]\n\n return keywords\n\n\ndef translate_keywords(input_string, from_lang=\"nl\", to_lang=\"nl\", level=1):\n \"\"\"\"Return code with keywords translated to language of choice in level of choice\"\"\"\n parser = get_parser(level, from_lang)\n\n punctuation_symbols = ['!', '?', '.']\n\n keywordDict = keywords_to_dict(to_lang)\n program_root = parser.parse(input_string + '\\n').children[0]\n abstract_syntaxtree = ExtractAST().transform(program_root)\n translator = TRANSPILER_LOOKUP[level]\n abstract_syntaxtree = translator(keywordDict, punctuation_symbols).transform(program_root)\n\n return abstract_syntaxtree\n\n\ndef hedy_translator(level):\n def decorating(c):\n TRANSPILER_LOOKUP[level] = c\n c.level = level\n return c\n\n return decorating\n\n\n@hedy_translator(level=1)\nclass ConvertToLang1(Transformer):\n\n def __init__(self, keywords, punctuation_symbols):\n self.keywords = keywords\n self.punctuation_symbols = punctuation_symbols\n __class__.level = 1\n\n def command(self, args):\n return args[0]\n\n def program(self, args):\n return '\\n'.join([str(c) for c in args])\n\n def text(self, args):\n return ''.join([str(c) for c in args])\n\n def invalid_space(self, args):\n return \" \" + ''.join([str(c) for c in args])\n\n def print(self, args):\n return self.keywords[\"print\"] + \" \" + \"\".join([str(c) for c in args])\n\n def echo(self, args):\n all_args = self.keywords[\"echo\"]\n if args:\n all_args += \" \"\n return all_args + \"\".join([str(c) for c in args])\n\n def ask(self, args):\n return self.keywords[\"ask\"] + \" \" + \"\".join([str(c) for c in args])\n\n def turn(self, args):\n return self.keywords[\"turn\"] + \" \" + \"\".join([str(c) for c in args])\n\n def forward(self, args):\n return self.keywords[\"forward\"] + \" \" + \"\".join([str(c) for c in args])\n\n def random(self, args):\n return self.keywords[\"random\"] + \"\".join([str(c) for c in args])\n\n def invalid(self, args):\n return ' '.join([str(c) for c in args])\n\n def __default__(self, data, children, meta):\n return Tree(data, children, meta)\n", "path": "hedy_translation.py"}]}
1,519
887
gh_patches_debug_715
rasdani/github-patches
git_diff
xorbitsai__inference-1092
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DOC: Add doc about installing from source code Note that the issue tracker is NOT the place for general support. </issue> <code> [start of doc/source/conf.py] 1 # Configuration file for the Sphinx documentation builder. 2 # 3 # This file only contains a selection of the most common options. For a full 4 # list see the documentation: 5 # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 7 # -- Path setup -------------------------------------------------------------- 8 9 # If extensions (or modules to document with autodoc) are in another directory, 10 # add these directories to sys.path here. If the directory is relative to the 11 # documentation root, use os.path.abspath to make it absolute, like shown here. 12 # 13 import os 14 # import sys 15 # sys.path.insert(0, os.path.abspath('.')) 16 17 18 # -- Project information ----------------------------------------------------- 19 20 project = 'Xinference' 21 copyright = '2023, Xorbits Inc.' 22 author = 'xorbitsai' 23 24 25 # -- General configuration --------------------------------------------------- 26 27 # Add any Sphinx extension module names here, as strings. They can be 28 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 29 # ones. 30 extensions = [ 31 "sphinx.ext.mathjax", 32 "sphinx.ext.ifconfig", 33 "sphinx.ext.intersphinx", 34 "sphinx.ext.viewcode", 35 "sphinx.ext.githubpages", 36 "sphinx.ext.autosummary", 37 "sphinx.ext.napoleon", 38 "sphinx_tabs.tabs", 39 "sphinx_design", 40 "IPython.sphinxext.ipython_directive", 41 "IPython.sphinxext.ipython_console_highlighting", 42 ] 43 44 # Add any paths that contain templates here, relative to this directory. 45 templates_path = ['_templates'] 46 47 # List of patterns, relative to source directory, that match files and 48 # directories to ignore when looking for source files. 49 # This pattern also affects html_static_path and html_extra_path. 50 exclude_patterns = [] 51 52 # i18n 53 locale_dirs = ["locale/"] # path is example but recommended. 54 gettext_compact = False # optional 55 56 57 # -- Options for HTML output ------------------------------------------------- 58 59 # The theme to use for HTML and HTML Help pages. See the documentation for 60 # a list of builtin themes. 61 # 62 html_theme = 'pydata_sphinx_theme' 63 html_title = "Xinference" 64 65 # Add any paths that contain custom static files (such as style sheets) here, 66 # relative to this directory. They are copied after the builtin static files, 67 # so a file named "default.css" will overwrite the builtin "default.css". 68 html_static_path = ['_static'] 69 70 # Define the json_url for our version switcher. 71 version_match = os.environ.get("READTHEDOCS_LANGUAGE") 72 json_url = "https://inference.readthedocs.io/en/latest/_static/switcher.json" 73 if not version_match: 74 version_match = 'en' 75 76 html_theme_options = { 77 "show_toc_level": 2, 78 "header_links_before_dropdown": 6, 79 "icon_links": [ 80 { 81 "name": "GitHub", 82 "url": "https://github.com/xorbitsai/inference", 83 "icon": "fa-brands fa-github", 84 "type": "fontawesome", 85 }, 86 ], 87 "navbar_align": "content", # [left, content, right] For testing that the navbar items align properly 88 "navbar_start": ["navbar-logo", "version-switcher"], 89 "navbar_center": ["navbar-nav"], 90 "switcher": { 91 "json_url": json_url, 92 "version_match": version_match, 93 }, 94 } 95 96 97 if version_match != 'zh-cn': 98 html_theme_options['icon_links'].extend([{ 99 "name": "Slack", 100 "url": "https://join.slack.com/t/xorbitsio/shared_invite/zt-1o3z9ucdh-RbfhbPVpx7prOVdM1CAuxg", 101 "icon": "fa-brands fa-slack", 102 "type": "fontawesome", 103 }, 104 { 105 "name": "Twitter", 106 "url": "https://twitter.com/xorbitsio", 107 "icon": "fa-brands fa-twitter", 108 "type": "fontawesome", 109 }]) 110 else: 111 html_theme_options['icon_links'].extend([{ 112 "name": "WeChat", 113 "url": "https://xorbits.cn/assets/images/wechat_work_qr.png", 114 "icon": "fa-brands fa-weixin", 115 "type": "fontawesome", 116 }, 117 { 118 "name": "Zhihu", 119 "url": "https://zhihu.com/org/xorbits", 120 "icon": "fa-brands fa-zhihu", 121 "type": "fontawesome", 122 }]) 123 html_theme_options["external_links"] = [ 124 {"name": "产品官网", "url": "https://xorbits.cn/inference"}, 125 ] 126 127 html_favicon = "_static/favicon.svg" 128 [end of doc/source/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -75,7 +75,7 @@ html_theme_options = { "show_toc_level": 2, - "header_links_before_dropdown": 6, + "header_links_before_dropdown": 7, "icon_links": [ { "name": "GitHub",
{"golden_diff": "diff --git a/doc/source/conf.py b/doc/source/conf.py\n--- a/doc/source/conf.py\n+++ b/doc/source/conf.py\n@@ -75,7 +75,7 @@\n \n html_theme_options = {\n \"show_toc_level\": 2,\n- \"header_links_before_dropdown\": 6,\n+ \"header_links_before_dropdown\": 7,\n \"icon_links\": [\n {\n \"name\": \"GitHub\",\n", "issue": "DOC: Add doc about installing from source code\nNote that the issue tracker is NOT the place for general support.\r\n\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Xinference'\ncopyright = '2023, Xorbits Inc.'\nauthor = 'xorbitsai'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.ifconfig\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.napoleon\",\n \"sphinx_tabs.tabs\",\n \"sphinx_design\",\n \"IPython.sphinxext.ipython_directive\",\n \"IPython.sphinxext.ipython_console_highlighting\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# i18n\nlocale_dirs = [\"locale/\"] # path is example but recommended.\ngettext_compact = False # optional\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'pydata_sphinx_theme'\nhtml_title = \"Xinference\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Define the json_url for our version switcher.\nversion_match = os.environ.get(\"READTHEDOCS_LANGUAGE\")\njson_url = \"https://inference.readthedocs.io/en/latest/_static/switcher.json\"\nif not version_match:\n version_match = 'en'\n\nhtml_theme_options = {\n \"show_toc_level\": 2,\n \"header_links_before_dropdown\": 6,\n \"icon_links\": [\n {\n \"name\": \"GitHub\",\n \"url\": \"https://github.com/xorbitsai/inference\",\n \"icon\": \"fa-brands fa-github\",\n \"type\": \"fontawesome\",\n },\n ],\n \"navbar_align\": \"content\", # [left, content, right] For testing that the navbar items align properly\n \"navbar_start\": [\"navbar-logo\", \"version-switcher\"],\n \"navbar_center\": [\"navbar-nav\"],\n \"switcher\": {\n \"json_url\": json_url,\n \"version_match\": version_match,\n },\n}\n\n\nif version_match != 'zh-cn':\n html_theme_options['icon_links'].extend([{\n \"name\": \"Slack\",\n \"url\": \"https://join.slack.com/t/xorbitsio/shared_invite/zt-1o3z9ucdh-RbfhbPVpx7prOVdM1CAuxg\",\n \"icon\": \"fa-brands fa-slack\",\n \"type\": \"fontawesome\",\n },\n {\n \"name\": \"Twitter\",\n \"url\": \"https://twitter.com/xorbitsio\",\n \"icon\": \"fa-brands fa-twitter\",\n \"type\": \"fontawesome\",\n }])\nelse:\n html_theme_options['icon_links'].extend([{\n \"name\": \"WeChat\",\n \"url\": \"https://xorbits.cn/assets/images/wechat_work_qr.png\",\n \"icon\": \"fa-brands fa-weixin\",\n \"type\": \"fontawesome\",\n },\n {\n \"name\": \"Zhihu\",\n \"url\": \"https://zhihu.com/org/xorbits\",\n \"icon\": \"fa-brands fa-zhihu\",\n \"type\": \"fontawesome\",\n }])\n html_theme_options[\"external_links\"] = [\n {\"name\": \"\u4ea7\u54c1\u5b98\u7f51\", \"url\": \"https://xorbits.cn/inference\"},\n ]\n\nhtml_favicon = \"_static/favicon.svg\"\n", "path": "doc/source/conf.py"}]}
1,863
94
gh_patches_debug_17951
rasdani/github-patches
git_diff
wemake-services__wemake-python-styleguide-243
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Forbid to compare the identical variables # Rule request <!-- Thanks for coming up with a proposal! --> ## Thesis We need to find cases like this: `if some_value == some_value:`. We also need to find other operators: `is`, `is not`, `>`, `<`, `!=`, `>=`, `<=`. ## Reasoning It clearly does not make any sense. It is a bug in the source code. Related: https://help.semmle.com/wiki/display/PYTHON/Comparison+of+identical+values </issue> <code> [start of wemake_python_styleguide/visitors/ast/comparisons.py] 1 # -*- coding: utf-8 -*- 2 3 import ast 4 from typing import ClassVar, Sequence 5 6 from wemake_python_styleguide.types import AnyNodes 7 from wemake_python_styleguide.violations.consistency import ( 8 ComparisonOrderViolation, 9 ConstantComparisonViolation, 10 MultipleInComparisonViolation, 11 ) 12 from wemake_python_styleguide.visitors.base import BaseNodeVisitor 13 14 15 class ConstantComparisonVisitor(BaseNodeVisitor): 16 """Restricts the comparison of literals.""" 17 18 def _check_is_literal(self, node: ast.AST) -> bool: 19 """ 20 Checks for nodes that contains only constants. 21 22 If the node contains only literals it will be evaluted. 23 When node relies on some other names, it won't be evaluted. 24 """ 25 try: 26 ast.literal_eval(node) 27 except ValueError: 28 return False 29 else: 30 return True 31 32 def _check_literal_compare(self, node: ast.Compare) -> None: 33 last_was_literal = self._check_is_literal(node.left) 34 for comparator in node.comparators: 35 next_is_literal = self._check_is_literal(comparator) 36 if last_was_literal and next_is_literal: 37 self.add_violation(ConstantComparisonViolation(node)) 38 break 39 last_was_literal = next_is_literal 40 41 def visit_Compare(self, node: ast.Compare) -> None: 42 """ 43 Ensures that compares are written correctly. 44 45 Raises: 46 ConstantComparisonViolation 47 48 """ 49 self._check_literal_compare(node) 50 self.generic_visit(node) 51 52 53 class WrongOrderVisitor(BaseNodeVisitor): 54 """Restricts comparision where argument doesn't come first.""" 55 56 _allowed_left_nodes: ClassVar[AnyNodes] = ( 57 ast.Name, 58 ast.Call, 59 ast.Attribute, 60 ) 61 62 _special_cases: ClassVar[AnyNodes] = ( 63 ast.In, 64 ast.NotIn, 65 ) 66 67 def _is_special_case(self, node: ast.Compare) -> bool: 68 """ 69 Operators ``in`` and ``not in`` are special cases. 70 71 Why? Because it is perfectly fine to use something like: 72 ``if 'key' in some_dict: ...`` 73 This should not be an issue. 74 75 When there are multiple special operators it is still a separate issue. 76 """ 77 return isinstance(node.ops[0], self._special_cases) 78 79 def _is_left_node_valid(self, left: ast.AST) -> bool: 80 if isinstance(left, self._allowed_left_nodes): 81 return True 82 if isinstance(left, ast.BinOp): 83 return ( 84 self._is_left_node_valid(left.left) or 85 self._is_left_node_valid(left.right) 86 ) 87 return False 88 89 def _has_wrong_nodes_on_the_right( 90 self, 91 comparators: Sequence[ast.AST], 92 ) -> bool: 93 for right in comparators: 94 if isinstance(right, self._allowed_left_nodes): 95 return True 96 if isinstance(right, ast.BinOp): 97 return self._has_wrong_nodes_on_the_right([ 98 right.left, right.right, 99 ]) 100 return False 101 102 def _check_ordering(self, node: ast.Compare) -> None: 103 if self._is_left_node_valid(node.left): 104 return 105 106 if self._is_special_case(node): 107 return 108 109 if len(node.comparators) > 1: 110 return 111 112 if not self._has_wrong_nodes_on_the_right(node.comparators): 113 return 114 115 self.add_violation(ComparisonOrderViolation(node)) 116 117 def visit_Compare(self, node: ast.Compare) -> None: 118 """ 119 Forbids comparision where argument doesn't come first. 120 121 Raises: 122 ComparisonOrderViolation 123 124 """ 125 self._check_ordering(node) 126 self.generic_visit(node) 127 128 129 # TODO(@sobolevn): refactor to be a single visitor 130 class MultipleInVisitor(BaseNodeVisitor): 131 """Restricts comparision where multiple `in`s are used.""" 132 133 def _has_multiple_in_comparisons(self, node: ast.Compare) -> bool: 134 count = 0 135 for op in node.ops: 136 if isinstance(op, ast.In): 137 count += 1 138 return count > 1 139 140 def _count_in_comparisons(self, node: ast.Compare) -> None: 141 if self._has_multiple_in_comparisons(node): 142 self.add_violation(MultipleInComparisonViolation(node)) 143 144 def visit_Compare(self, node: ast.Compare) -> None: 145 """ 146 Forbids comparisons including multiple 'in's in a statement. 147 148 Raise: 149 MultipleInComparisonViolation 150 151 """ 152 self._count_in_comparisons(node) 153 self.generic_visit(node) 154 [end of wemake_python_styleguide/visitors/ast/comparisons.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wemake_python_styleguide/visitors/ast/comparisons.py b/wemake_python_styleguide/visitors/ast/comparisons.py --- a/wemake_python_styleguide/visitors/ast/comparisons.py +++ b/wemake_python_styleguide/visitors/ast/comparisons.py @@ -151,3 +151,32 @@ """ self._count_in_comparisons(node) self.generic_visit(node) + + +class RedundantComparisonVisitor(BaseNodeVisitor): + """Restricts the comparison where always same result.""" + + def visit_Compare(self, node: ast.Compare) -> None: + """ + Ensures that compares are not for same variable. + + Raises: + ConstantComparisonViolation + + """ + self._check_redundant_compare(node) + self.generic_visit(node) + + def _is_same_variable(self, left: ast.AST, right: ast.AST) -> bool: + if isinstance(left, ast.Name) and isinstance(right, ast.Name): + if left.id is right.id: + return True + return False + + def _check_redundant_compare(self, node: ast.Compare) -> None: + last_variable = node.left + for next_variable in node.comparators: + if self._is_same_variable(last_variable, next_variable): + self.add_violation(ConstantComparisonViolation(node)) + break + last_variable = next_variable
{"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/comparisons.py b/wemake_python_styleguide/visitors/ast/comparisons.py\n--- a/wemake_python_styleguide/visitors/ast/comparisons.py\n+++ b/wemake_python_styleguide/visitors/ast/comparisons.py\n@@ -151,3 +151,32 @@\n \"\"\"\n self._count_in_comparisons(node)\n self.generic_visit(node)\n+\n+\n+class RedundantComparisonVisitor(BaseNodeVisitor):\n+ \"\"\"Restricts the comparison where always same result.\"\"\"\n+\n+ def visit_Compare(self, node: ast.Compare) -> None:\n+ \"\"\"\n+ Ensures that compares are not for same variable.\n+\n+ Raises:\n+ ConstantComparisonViolation\n+\n+ \"\"\"\n+ self._check_redundant_compare(node)\n+ self.generic_visit(node)\n+\n+ def _is_same_variable(self, left: ast.AST, right: ast.AST) -> bool:\n+ if isinstance(left, ast.Name) and isinstance(right, ast.Name):\n+ if left.id is right.id:\n+ return True\n+ return False\n+\n+ def _check_redundant_compare(self, node: ast.Compare) -> None:\n+ last_variable = node.left\n+ for next_variable in node.comparators:\n+ if self._is_same_variable(last_variable, next_variable):\n+ self.add_violation(ConstantComparisonViolation(node))\n+ break\n+ last_variable = next_variable\n", "issue": "Forbid to compare the identical variables\n# Rule request\r\n\r\n<!-- Thanks for coming up with a proposal! -->\r\n\r\n## Thesis\r\n\r\nWe need to find cases like this: `if some_value == some_value:`.\r\nWe also need to find other operators: `is`, `is not`, `>`, `<`, `!=`, `>=`, `<=`.\r\n\r\n## Reasoning\r\n\r\nIt clearly does not make any sense. It is a bug in the source code.\r\n\r\nRelated: https://help.semmle.com/wiki/display/PYTHON/Comparison+of+identical+values\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import ClassVar, Sequence\n\nfrom wemake_python_styleguide.types import AnyNodes\nfrom wemake_python_styleguide.violations.consistency import (\n ComparisonOrderViolation,\n ConstantComparisonViolation,\n MultipleInComparisonViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass ConstantComparisonVisitor(BaseNodeVisitor):\n \"\"\"Restricts the comparison of literals.\"\"\"\n\n def _check_is_literal(self, node: ast.AST) -> bool:\n \"\"\"\n Checks for nodes that contains only constants.\n\n If the node contains only literals it will be evaluted.\n When node relies on some other names, it won't be evaluted.\n \"\"\"\n try:\n ast.literal_eval(node)\n except ValueError:\n return False\n else:\n return True\n\n def _check_literal_compare(self, node: ast.Compare) -> None:\n last_was_literal = self._check_is_literal(node.left)\n for comparator in node.comparators:\n next_is_literal = self._check_is_literal(comparator)\n if last_was_literal and next_is_literal:\n self.add_violation(ConstantComparisonViolation(node))\n break\n last_was_literal = next_is_literal\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Ensures that compares are written correctly.\n\n Raises:\n ConstantComparisonViolation\n\n \"\"\"\n self._check_literal_compare(node)\n self.generic_visit(node)\n\n\nclass WrongOrderVisitor(BaseNodeVisitor):\n \"\"\"Restricts comparision where argument doesn't come first.\"\"\"\n\n _allowed_left_nodes: ClassVar[AnyNodes] = (\n ast.Name,\n ast.Call,\n ast.Attribute,\n )\n\n _special_cases: ClassVar[AnyNodes] = (\n ast.In,\n ast.NotIn,\n )\n\n def _is_special_case(self, node: ast.Compare) -> bool:\n \"\"\"\n Operators ``in`` and ``not in`` are special cases.\n\n Why? Because it is perfectly fine to use something like:\n ``if 'key' in some_dict: ...``\n This should not be an issue.\n\n When there are multiple special operators it is still a separate issue.\n \"\"\"\n return isinstance(node.ops[0], self._special_cases)\n\n def _is_left_node_valid(self, left: ast.AST) -> bool:\n if isinstance(left, self._allowed_left_nodes):\n return True\n if isinstance(left, ast.BinOp):\n return (\n self._is_left_node_valid(left.left) or\n self._is_left_node_valid(left.right)\n )\n return False\n\n def _has_wrong_nodes_on_the_right(\n self,\n comparators: Sequence[ast.AST],\n ) -> bool:\n for right in comparators:\n if isinstance(right, self._allowed_left_nodes):\n return True\n if isinstance(right, ast.BinOp):\n return self._has_wrong_nodes_on_the_right([\n right.left, right.right,\n ])\n return False\n\n def _check_ordering(self, node: ast.Compare) -> None:\n if self._is_left_node_valid(node.left):\n return\n\n if self._is_special_case(node):\n return\n\n if len(node.comparators) > 1:\n return\n\n if not self._has_wrong_nodes_on_the_right(node.comparators):\n return\n\n self.add_violation(ComparisonOrderViolation(node))\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Forbids comparision where argument doesn't come first.\n\n Raises:\n ComparisonOrderViolation\n\n \"\"\"\n self._check_ordering(node)\n self.generic_visit(node)\n\n\n# TODO(@sobolevn): refactor to be a single visitor\nclass MultipleInVisitor(BaseNodeVisitor):\n \"\"\"Restricts comparision where multiple `in`s are used.\"\"\"\n\n def _has_multiple_in_comparisons(self, node: ast.Compare) -> bool:\n count = 0\n for op in node.ops:\n if isinstance(op, ast.In):\n count += 1\n return count > 1\n\n def _count_in_comparisons(self, node: ast.Compare) -> None:\n if self._has_multiple_in_comparisons(node):\n self.add_violation(MultipleInComparisonViolation(node))\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Forbids comparisons including multiple 'in's in a statement.\n\n Raise:\n MultipleInComparisonViolation\n\n \"\"\"\n self._count_in_comparisons(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/comparisons.py"}]}
2,029
335
gh_patches_debug_15473
rasdani/github-patches
git_diff
matrix-org__synapse-5354
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Implement rooms v5 Synapse v1.0 will default to rooms v4, however the intention is that it also ships with rooms v5 which is envisaged to be rooms v4 + https://github.com/matrix-org/synapse/issues/4364 rooms v5 is yet to be MSC'd so does not formally exist at this point, this issue is a place holder. </issue> <code> [start of synapse/api/room_versions.py] 1 # -*- coding: utf-8 -*- 2 # Copyright 2019 New Vector Ltd 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 import attr 16 17 18 class EventFormatVersions(object): 19 """This is an internal enum for tracking the version of the event format, 20 independently from the room version. 21 """ 22 V1 = 1 # $id:server event id format 23 V2 = 2 # MSC1659-style $hash event id format: introduced for room v3 24 V3 = 3 # MSC1884-style $hash format: introduced for room v4 25 26 27 KNOWN_EVENT_FORMAT_VERSIONS = { 28 EventFormatVersions.V1, 29 EventFormatVersions.V2, 30 EventFormatVersions.V3, 31 } 32 33 34 class StateResolutionVersions(object): 35 """Enum to identify the state resolution algorithms""" 36 V1 = 1 # room v1 state res 37 V2 = 2 # MSC1442 state res: room v2 and later 38 39 40 class RoomDisposition(object): 41 STABLE = "stable" 42 UNSTABLE = "unstable" 43 44 45 @attr.s(slots=True, frozen=True) 46 class RoomVersion(object): 47 """An object which describes the unique attributes of a room version.""" 48 49 identifier = attr.ib() # str; the identifier for this version 50 disposition = attr.ib() # str; one of the RoomDispositions 51 event_format = attr.ib() # int; one of the EventFormatVersions 52 state_res = attr.ib() # int; one of the StateResolutionVersions 53 enforce_key_validity = attr.ib() # bool 54 55 56 class RoomVersions(object): 57 V1 = RoomVersion( 58 "1", 59 RoomDisposition.STABLE, 60 EventFormatVersions.V1, 61 StateResolutionVersions.V1, 62 enforce_key_validity=False, 63 ) 64 V2 = RoomVersion( 65 "2", 66 RoomDisposition.STABLE, 67 EventFormatVersions.V1, 68 StateResolutionVersions.V2, 69 enforce_key_validity=False, 70 ) 71 V3 = RoomVersion( 72 "3", 73 RoomDisposition.STABLE, 74 EventFormatVersions.V2, 75 StateResolutionVersions.V2, 76 enforce_key_validity=False, 77 ) 78 V4 = RoomVersion( 79 "4", 80 RoomDisposition.STABLE, 81 EventFormatVersions.V3, 82 StateResolutionVersions.V2, 83 enforce_key_validity=False, 84 ) 85 VDH_TEST_KEY_VALIDITY = RoomVersion( 86 "vdh-test-key-validity", 87 RoomDisposition.UNSTABLE, 88 EventFormatVersions.V3, 89 StateResolutionVersions.V2, 90 enforce_key_validity=False, 91 ) 92 93 94 KNOWN_ROOM_VERSIONS = { 95 v.identifier: v for v in ( 96 RoomVersions.V1, 97 RoomVersions.V2, 98 RoomVersions.V3, 99 RoomVersions.V4, 100 RoomVersions.VDH_TEST_KEY_VALIDITY, 101 ) 102 } # type: dict[str, RoomVersion] 103 [end of synapse/api/room_versions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -82,12 +82,12 @@ StateResolutionVersions.V2, enforce_key_validity=False, ) - VDH_TEST_KEY_VALIDITY = RoomVersion( - "vdh-test-key-validity", - RoomDisposition.UNSTABLE, + V5 = RoomVersion( + "5", + RoomDisposition.STABLE, EventFormatVersions.V3, StateResolutionVersions.V2, - enforce_key_validity=False, + enforce_key_validity=True, ) @@ -97,6 +97,6 @@ RoomVersions.V2, RoomVersions.V3, RoomVersions.V4, - RoomVersions.VDH_TEST_KEY_VALIDITY, + RoomVersions.V5, ) } # type: dict[str, RoomVersion]
{"golden_diff": "diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py\n--- a/synapse/api/room_versions.py\n+++ b/synapse/api/room_versions.py\n@@ -82,12 +82,12 @@\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n- VDH_TEST_KEY_VALIDITY = RoomVersion(\n- \"vdh-test-key-validity\",\n- RoomDisposition.UNSTABLE,\n+ V5 = RoomVersion(\n+ \"5\",\n+ RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n- enforce_key_validity=False,\n+ enforce_key_validity=True,\n )\n \n \n@@ -97,6 +97,6 @@\n RoomVersions.V2,\n RoomVersions.V3,\n RoomVersions.V4,\n- RoomVersions.VDH_TEST_KEY_VALIDITY,\n+ RoomVersions.V5,\n )\n } # type: dict[str, RoomVersion]\n", "issue": "Implement rooms v5\nSynapse v1.0 will default to rooms v4, however the intention is that it also ships with rooms v5 which is envisaged to be rooms v4 + https://github.com/matrix-org/synapse/issues/4364\r\n\r\nrooms v5 is yet to be MSC'd so does not formally exist at this point, this issue is a place holder.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2019 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport attr\n\n\nclass EventFormatVersions(object):\n \"\"\"This is an internal enum for tracking the version of the event format,\n independently from the room version.\n \"\"\"\n V1 = 1 # $id:server event id format\n V2 = 2 # MSC1659-style $hash event id format: introduced for room v3\n V3 = 3 # MSC1884-style $hash format: introduced for room v4\n\n\nKNOWN_EVENT_FORMAT_VERSIONS = {\n EventFormatVersions.V1,\n EventFormatVersions.V2,\n EventFormatVersions.V3,\n}\n\n\nclass StateResolutionVersions(object):\n \"\"\"Enum to identify the state resolution algorithms\"\"\"\n V1 = 1 # room v1 state res\n V2 = 2 # MSC1442 state res: room v2 and later\n\n\nclass RoomDisposition(object):\n STABLE = \"stable\"\n UNSTABLE = \"unstable\"\n\n\[email protected](slots=True, frozen=True)\nclass RoomVersion(object):\n \"\"\"An object which describes the unique attributes of a room version.\"\"\"\n\n identifier = attr.ib() # str; the identifier for this version\n disposition = attr.ib() # str; one of the RoomDispositions\n event_format = attr.ib() # int; one of the EventFormatVersions\n state_res = attr.ib() # int; one of the StateResolutionVersions\n enforce_key_validity = attr.ib() # bool\n\n\nclass RoomVersions(object):\n V1 = RoomVersion(\n \"1\",\n RoomDisposition.STABLE,\n EventFormatVersions.V1,\n StateResolutionVersions.V1,\n enforce_key_validity=False,\n )\n V2 = RoomVersion(\n \"2\",\n RoomDisposition.STABLE,\n EventFormatVersions.V1,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n V3 = RoomVersion(\n \"3\",\n RoomDisposition.STABLE,\n EventFormatVersions.V2,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n V4 = RoomVersion(\n \"4\",\n RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n VDH_TEST_KEY_VALIDITY = RoomVersion(\n \"vdh-test-key-validity\",\n RoomDisposition.UNSTABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n )\n\n\nKNOWN_ROOM_VERSIONS = {\n v.identifier: v for v in (\n RoomVersions.V1,\n RoomVersions.V2,\n RoomVersions.V3,\n RoomVersions.V4,\n RoomVersions.VDH_TEST_KEY_VALIDITY,\n )\n} # type: dict[str, RoomVersion]\n", "path": "synapse/api/room_versions.py"}]}
1,569
219
gh_patches_debug_2853
rasdani/github-patches
git_diff
getsentry__sentry-python-1545
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Typo in setup.py for 1.9.1 release breaks `conda export` ### How do you use Sentry? Self-hosted/on-premise ### Version 1.9.1 ### Steps to Reproduce This line https://github.com/getsentry/sentry-python/commit/b7c0dc412a1505fff382732f567952c8a9572b60#diff-60f61ab7a8d1910d86d9fda2261620314edcae5894d5aaa236b821c7256badd7R43 `'urllib3>=1.26.11"; python_version >="3.6"',` Has a stray `"` after the urllib3 version number. This passes by pipi but confuses conda, causing a very opaque error message when you try to run `conda export`: ``` InvalidVersionSpec: Invalid version '(>=1.26.11': unable to convert to expression tree: ['('] ``` Steps to reproduce: ``` conda create -n test-sentry-sdk python=3.8 conda activate test-sentry-sdk pip install sentry-sdk==1.9.1 conda export ``` ### Expected Result Expect to get a conda environment yaml spec. ### Actual Result ``` InvalidVersionSpec: Invalid version '(>=1.26.11': unable to convert to expression tree: ['('] ``` </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 """ 4 Sentry-Python - Sentry SDK for Python 5 ===================================== 6 7 **Sentry-Python is an SDK for Sentry.** Check out `GitHub 8 <https://github.com/getsentry/sentry-python>`_ to find out more. 9 """ 10 11 import os 12 from setuptools import setup, find_packages 13 14 here = os.path.abspath(os.path.dirname(__file__)) 15 16 17 def get_file_text(file_name): 18 with open(os.path.join(here, file_name)) as in_file: 19 return in_file.read() 20 21 22 setup( 23 name="sentry-sdk", 24 version="1.9.1", 25 author="Sentry Team and Contributors", 26 author_email="[email protected]", 27 url="https://github.com/getsentry/sentry-python", 28 project_urls={ 29 "Documentation": "https://docs.sentry.io/platforms/python/", 30 "Changelog": "https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md", 31 }, 32 description="Python client for Sentry (https://sentry.io)", 33 long_description=get_file_text("README.md"), 34 long_description_content_type="text/markdown", 35 packages=find_packages(exclude=("tests", "tests.*")), 36 # PEP 561 37 package_data={"sentry_sdk": ["py.typed"]}, 38 zip_safe=False, 39 license="BSD", 40 install_requires=[ 41 'urllib3>=1.25.7; python_version<="3.4"', 42 'urllib3>=1.26.9; python_version>="3.5"', 43 'urllib3>=1.26.11"; python_version >="3.6"', 44 "certifi", 45 ], 46 extras_require={ 47 "flask": ["flask>=0.11", "blinker>=1.1"], 48 "quart": ["quart>=0.16.1", "blinker>=1.1"], 49 "bottle": ["bottle>=0.12.13"], 50 "falcon": ["falcon>=1.4"], 51 "django": ["django>=1.8"], 52 "sanic": ["sanic>=0.8"], 53 "celery": ["celery>=3"], 54 "beam": ["apache-beam>=2.12"], 55 "rq": ["rq>=0.6"], 56 "aiohttp": ["aiohttp>=3.5"], 57 "tornado": ["tornado>=5"], 58 "sqlalchemy": ["sqlalchemy>=1.2"], 59 "pyspark": ["pyspark>=2.4.4"], 60 "pure_eval": ["pure_eval", "executing", "asttokens"], 61 "chalice": ["chalice>=1.16.0"], 62 "httpx": ["httpx>=0.16.0"], 63 "starlette": ["starlette>=0.19.1"], 64 "fastapi": ["fastapi>=0.79.0"], 65 }, 66 classifiers=[ 67 "Development Status :: 5 - Production/Stable", 68 "Environment :: Web Environment", 69 "Intended Audience :: Developers", 70 "License :: OSI Approved :: BSD License", 71 "Operating System :: OS Independent", 72 "Programming Language :: Python", 73 "Programming Language :: Python :: 2", 74 "Programming Language :: Python :: 2.7", 75 "Programming Language :: Python :: 3", 76 "Programming Language :: Python :: 3.4", 77 "Programming Language :: Python :: 3.5", 78 "Programming Language :: Python :: 3.6", 79 "Programming Language :: Python :: 3.7", 80 "Programming Language :: Python :: 3.8", 81 "Programming Language :: Python :: 3.9", 82 "Programming Language :: Python :: 3.10", 83 "Topic :: Software Development :: Libraries :: Python Modules", 84 ], 85 options={"bdist_wheel": {"universal": "1"}}, 86 ) 87 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -40,7 +40,7 @@ install_requires=[ 'urllib3>=1.25.7; python_version<="3.4"', 'urllib3>=1.26.9; python_version>="3.5"', - 'urllib3>=1.26.11"; python_version >="3.6"', + 'urllib3>=1.26.11; python_version >="3.6"', "certifi", ], extras_require={
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -40,7 +40,7 @@\n install_requires=[\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version>=\"3.5\"',\n- 'urllib3>=1.26.11\"; python_version >=\"3.6\"',\n+ 'urllib3>=1.26.11; python_version >=\"3.6\"',\n \"certifi\",\n ],\n extras_require={\n", "issue": "Typo in setup.py for 1.9.1 release breaks `conda export`\n### How do you use Sentry?\n\nSelf-hosted/on-premise\n\n### Version\n\n1.9.1\n\n### Steps to Reproduce\n\nThis line \r\nhttps://github.com/getsentry/sentry-python/commit/b7c0dc412a1505fff382732f567952c8a9572b60#diff-60f61ab7a8d1910d86d9fda2261620314edcae5894d5aaa236b821c7256badd7R43\r\n\r\n`'urllib3>=1.26.11\"; python_version >=\"3.6\"',`\r\n\r\nHas a stray `\"` after the urllib3 version number.\r\n\r\nThis passes by pipi but confuses conda, causing a very opaque error message when you try to run `conda export`:\r\n```\r\nInvalidVersionSpec: Invalid version '(>=1.26.11': unable to convert to expression tree: ['(']\r\n```\r\n\r\nSteps to reproduce:\r\n```\r\nconda create -n test-sentry-sdk python=3.8\r\nconda activate test-sentry-sdk\r\npip install sentry-sdk==1.9.1\r\nconda export\r\n```\n\n### Expected Result\n\nExpect to get a conda environment yaml spec.\n\n### Actual Result\n\n```\r\nInvalidVersionSpec: Invalid version '(>=1.26.11': unable to convert to expression tree: ['(']\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nSentry-Python - Sentry SDK for Python\n=====================================\n\n**Sentry-Python is an SDK for Sentry.** Check out `GitHub\n<https://github.com/getsentry/sentry-python>`_ to find out more.\n\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_file_text(file_name):\n with open(os.path.join(here, file_name)) as in_file:\n return in_file.read()\n\n\nsetup(\n name=\"sentry-sdk\",\n version=\"1.9.1\",\n author=\"Sentry Team and Contributors\",\n author_email=\"[email protected]\",\n url=\"https://github.com/getsentry/sentry-python\",\n project_urls={\n \"Documentation\": \"https://docs.sentry.io/platforms/python/\",\n \"Changelog\": \"https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md\",\n },\n description=\"Python client for Sentry (https://sentry.io)\",\n long_description=get_file_text(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n # PEP 561\n package_data={\"sentry_sdk\": [\"py.typed\"]},\n zip_safe=False,\n license=\"BSD\",\n install_requires=[\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version>=\"3.5\"',\n 'urllib3>=1.26.11\"; python_version >=\"3.6\"',\n \"certifi\",\n ],\n extras_require={\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\"],\n \"quart\": [\"quart>=0.16.1\", \"blinker>=1.1\"],\n \"bottle\": [\"bottle>=0.12.13\"],\n \"falcon\": [\"falcon>=1.4\"],\n \"django\": [\"django>=1.8\"],\n \"sanic\": [\"sanic>=0.8\"],\n \"celery\": [\"celery>=3\"],\n \"beam\": [\"apache-beam>=2.12\"],\n \"rq\": [\"rq>=0.6\"],\n \"aiohttp\": [\"aiohttp>=3.5\"],\n \"tornado\": [\"tornado>=5\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.2\"],\n \"pyspark\": [\"pyspark>=2.4.4\"],\n \"pure_eval\": [\"pure_eval\", \"executing\", \"asttokens\"],\n \"chalice\": [\"chalice>=1.16.0\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"starlette\": [\"starlette>=0.19.1\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n options={\"bdist_wheel\": {\"universal\": \"1\"}},\n)\n", "path": "setup.py"}]}
1,883
136
gh_patches_debug_34016
rasdani/github-patches
git_diff
akvo__akvo-rsr-2044
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> File upload for Indicator Period Data requires Django login When uploading a photo/file to the /rest/v1/indicator_period_data/nnn/upload_file/ endpoint, a Django login is required, which is not the case for the rest of the API; an API key should be sufficient. </issue> <code> [start of akvo/rest/views/indicator_period_data.py] 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 8 from akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment 9 10 from ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer, 11 IndicatorPeriodDataCommentSerializer) 12 from ..viewsets import PublicProjectViewSet 13 14 from rest_framework.decorators import api_view, permission_classes 15 from rest_framework.permissions import IsAuthenticated 16 from rest_framework.response import Response 17 18 19 class IndicatorPeriodDataViewSet(PublicProjectViewSet): 20 """ 21 """ 22 queryset = IndicatorPeriodData.objects.all() 23 serializer_class = IndicatorPeriodDataSerializer 24 filter_fields = { 25 'period': ['exact'], 26 'period__indicator': ['exact'], 27 'period__indicator__result': ['exact'], 28 'period__indicator__result__project': ['exact'], 29 'user': ['exact'], 30 'status': ['exact'], 31 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ], 32 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ], 33 'relative_data': ['exact'], 34 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ], 35 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ], 36 'update_method': ['exact'] 37 } 38 39 project_relation = 'period__indicator__result__project__' 40 41 42 class IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet): 43 """ 44 """ 45 queryset = IndicatorPeriodData.objects.all() 46 serializer_class = IndicatorPeriodDataFrameworkSerializer 47 filter_fields = { 48 'period': ['exact'], 49 'period__indicator': ['exact'], 50 'period__indicator__result': ['exact'], 51 'period__indicator__result__project': ['exact'], 52 'user': ['exact'], 53 'status': ['exact'], 54 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ], 55 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ], 56 'relative_data': ['exact'], 57 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ], 58 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ], 59 'update_method': ['exact'] 60 } 61 project_relation = 'period__indicator__result__project__' 62 63 64 class IndicatorPeriodDataCommentViewSet(PublicProjectViewSet): 65 """ 66 """ 67 queryset = IndicatorPeriodDataComment.objects.all() 68 serializer_class = IndicatorPeriodDataCommentSerializer 69 filter_fields = { 70 'data': ['exact'], 71 'data__period': ['exact'], 72 'data__period__indicator': ['exact'], 73 'data__period__indicator__result': ['exact'], 74 'data__period__indicator__result__project': ['exact'], 75 'user': ['exact'], 76 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ], 77 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ] 78 } 79 project_relation = 'period__indicator__result__project__' 80 81 82 @api_view(['POST']) 83 @permission_classes((IsAuthenticated, )) 84 def indicator_upload_file(request, pk=None): 85 """ 86 Special API call for directly uploading a file. 87 88 :param request; A Django request object. 89 :param pk; The primary key of an IndicatorPeriodData instance. 90 """ 91 update = IndicatorPeriodData.objects.get(pk=pk) 92 upload_file = request.FILES['file'] 93 94 # TODO: Permissions 95 # user = request.user 96 97 file_type = request.POST.copy()['type'] 98 if file_type == 'photo': 99 update.photo = upload_file 100 update.save(update_fields=['photo']) 101 return Response({'file': update.photo.url}) 102 elif file_type == 'file': 103 update.file = upload_file 104 update.save(update_fields=['file']) 105 return Response({'file': update.file.url}) 106 107 # TODO: Error response 108 return Response({}) 109 [end of akvo/rest/views/indicator_period_data.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py --- a/akvo/rest/views/indicator_period_data.py +++ b/akvo/rest/views/indicator_period_data.py @@ -11,8 +11,10 @@ IndicatorPeriodDataCommentSerializer) from ..viewsets import PublicProjectViewSet +from django.http import HttpResponseForbidden + +from rest_framework import status from rest_framework.decorators import api_view, permission_classes -from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response @@ -80,7 +82,6 @@ @api_view(['POST']) -@permission_classes((IsAuthenticated, )) def indicator_upload_file(request, pk=None): """ Special API call for directly uploading a file. @@ -91,18 +92,24 @@ update = IndicatorPeriodData.objects.get(pk=pk) upload_file = request.FILES['file'] - # TODO: Permissions - # user = request.user - - file_type = request.POST.copy()['type'] - if file_type == 'photo': - update.photo = upload_file - update.save(update_fields=['photo']) - return Response({'file': update.photo.url}) - elif file_type == 'file': - update.file = upload_file - update.save(update_fields=['file']) - return Response({'file': update.file.url}) - - # TODO: Error response - return Response({}) + # Permissions + user = getattr(request, 'user', None) + if not user: + return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN) + + if not user.has_perm('rsr.change_project', update.period.indicator.result.project): + return Response({'error': 'User has no permission to place an update'}, + status=status.HTTP_403_FORBIDDEN) + + try: + file_type = request.POST.copy()['type'] + if file_type == 'photo': + update.photo = upload_file + update.save(update_fields=['photo']) + return Response({'file': update.photo.url}) + elif file_type == 'file': + update.file = upload_file + update.save(update_fields=['file']) + return Response({'file': update.file.url}) + except Exception as e: + return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)
{"golden_diff": "diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py\n--- a/akvo/rest/views/indicator_period_data.py\n+++ b/akvo/rest/views/indicator_period_data.py\n@@ -11,8 +11,10 @@\n IndicatorPeriodDataCommentSerializer)\n from ..viewsets import PublicProjectViewSet\n \n+from django.http import HttpResponseForbidden\n+\n+from rest_framework import status\n from rest_framework.decorators import api_view, permission_classes\n-from rest_framework.permissions import IsAuthenticated\n from rest_framework.response import Response\n \n \n@@ -80,7 +82,6 @@\n \n \n @api_view(['POST'])\n-@permission_classes((IsAuthenticated, ))\n def indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n@@ -91,18 +92,24 @@\n update = IndicatorPeriodData.objects.get(pk=pk)\n upload_file = request.FILES['file']\n \n- # TODO: Permissions\n- # user = request.user\n-\n- file_type = request.POST.copy()['type']\n- if file_type == 'photo':\n- update.photo = upload_file\n- update.save(update_fields=['photo'])\n- return Response({'file': update.photo.url})\n- elif file_type == 'file':\n- update.file = upload_file\n- update.save(update_fields=['file'])\n- return Response({'file': update.file.url})\n-\n- # TODO: Error response\n- return Response({})\n+ # Permissions\n+ user = getattr(request, 'user', None)\n+ if not user:\n+ return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)\n+\n+ if not user.has_perm('rsr.change_project', update.period.indicator.result.project):\n+ return Response({'error': 'User has no permission to place an update'},\n+ status=status.HTTP_403_FORBIDDEN)\n+\n+ try:\n+ file_type = request.POST.copy()['type']\n+ if file_type == 'photo':\n+ update.photo = upload_file\n+ update.save(update_fields=['photo'])\n+ return Response({'file': update.photo.url})\n+ elif file_type == 'file':\n+ update.file = upload_file\n+ update.save(update_fields=['file'])\n+ return Response({'file': update.file.url})\n+ except Exception as e:\n+ return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n", "issue": "File upload for Indicator Period Data requires Django login\nWhen uploading a photo/file to the /rest/v1/indicator_period_data/nnn/upload_file/ endpoint, a Django login is required, which is not the case for the rest of the API; an API key should be sufficient.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment\n\nfrom ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,\n IndicatorPeriodDataCommentSerializer)\nfrom ..viewsets import PublicProjectViewSet\n\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\n\nclass IndicatorPeriodDataViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataSerializer\n filter_fields = {\n 'period': ['exact'],\n 'period__indicator': ['exact'],\n 'period__indicator__result': ['exact'],\n 'period__indicator__result__project': ['exact'],\n 'user': ['exact'],\n 'status': ['exact'],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'relative_data': ['exact'],\n 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'update_method': ['exact']\n }\n\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n filter_fields = {\n 'period': ['exact'],\n 'period__indicator': ['exact'],\n 'period__indicator__result': ['exact'],\n 'period__indicator__result__project': ['exact'],\n 'user': ['exact'],\n 'status': ['exact'],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'relative_data': ['exact'],\n 'data': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'period_actual_value': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'update_method': ['exact']\n }\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodDataComment.objects.all()\n serializer_class = IndicatorPeriodDataCommentSerializer\n filter_fields = {\n 'data': ['exact'],\n 'data__period': ['exact'],\n 'data__period__indicator': ['exact'],\n 'data__period__indicator__result': ['exact'],\n 'data__period__indicator__result__project': ['exact'],\n 'user': ['exact'],\n 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ]\n }\n project_relation = 'period__indicator__result__project__'\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated, ))\ndef indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n\n :param request; A Django request object.\n :param pk; The primary key of an IndicatorPeriodData instance.\n \"\"\"\n update = IndicatorPeriodData.objects.get(pk=pk)\n upload_file = request.FILES['file']\n\n # TODO: Permissions\n # user = request.user\n\n file_type = request.POST.copy()['type']\n if file_type == 'photo':\n update.photo = upload_file\n update.save(update_fields=['photo'])\n return Response({'file': update.photo.url})\n elif file_type == 'file':\n update.file = upload_file\n update.save(update_fields=['file'])\n return Response({'file': update.file.url})\n\n # TODO: Error response\n return Response({})\n", "path": "akvo/rest/views/indicator_period_data.py"}]}
1,742
540
gh_patches_debug_896
rasdani/github-patches
git_diff
bridgecrewio__checkov-1228
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> boto3 is fixed at the patch level version **Is your feature request related to a problem? Please describe.** free boto3 dependency patch version. **Describe the solution you'd like** replace the line here: https://github.com/bridgecrewio/checkov/blob/master/Pipfile#L29 with ``` boto3 = "==1.17.*" ``` **Describe alternatives you've considered** there are no alternatives as the patch version i don't see why is locked. it can cause conflicts with already installed boto3 library **Additional context** boto3 dependency install latest patch version By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. Fixes #1211 </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import logging 3 import os 4 from importlib import util 5 from os import path 6 7 import setuptools 8 from setuptools import setup 9 10 # read the contents of your README file 11 this_directory = path.abspath(path.dirname(__file__)) 12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f: 13 long_description = f.read() 14 15 logger = logging.getLogger(__name__) 16 spec = util.spec_from_file_location( 17 "checkov.version", os.path.join("checkov", "version.py") 18 ) 19 # noinspection PyUnresolvedReferences 20 mod = util.module_from_spec(spec) 21 spec.loader.exec_module(mod) # type: ignore 22 version = mod.version # type: ignore 23 24 setup( 25 extras_require={ 26 "dev": [ 27 "pytest==5.3.1", 28 "coverage", 29 "coverage-badge", 30 "GitPython==3.1.7", 31 "bandit" 32 ] 33 }, 34 install_requires=[ 35 "bc-python-hcl2>=0.3.18", 36 "cloudsplaining>=0.4.1", 37 "deep_merge", 38 "tabulate", 39 "colorama", 40 "termcolor", 41 "junit-xml", 42 "dpath>=1.5.0,<2", 43 "pyyaml>=5.4.1", 44 "boto3==1.17.27", 45 "GitPython", 46 "six==1.15.0", 47 "jmespath", 48 "tqdm", 49 "update_checker", 50 "semantic_version", 51 "packaging", 52 "networkx", 53 "dockerfile-parse", 54 "docker" 55 ], 56 license="Apache License 2.0", 57 name="checkov", 58 version=version, 59 python_requires=">=3.7", 60 description="Infrastructure as code static analysis", 61 author="bridgecrew", 62 author_email="[email protected]", 63 url="https://github.com/nimrodkor/checkov", 64 packages=setuptools.find_packages(exclude=["tests*","integration_tests*"]), 65 include_package_data=True, 66 package_dir={'checkov.terraform.checks.graph_checks': 'checkov/terraform/checks/graph_checks'}, 67 package_data = {'checkov.terraform.checks.graph_checks': ['aws/*.yaml', 'gcp/*.yaml', 'azure/*.yaml']}, 68 scripts=["bin/checkov", "bin/checkov.cmd"], 69 long_description=long_description, 70 long_description_content_type="text/markdown", 71 classifiers=[ 72 'Environment :: Console', 73 'Intended Audience :: Developers', 74 'Intended Audience :: System Administrators', 75 'Programming Language :: Python :: 3.7', 76 'Programming Language :: Python :: 3.8', 77 'Programming Language :: Python :: 3.9', 78 'Topic :: Security', 79 'Topic :: Software Development :: Build Tools' 80 ] 81 ) 82 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ "junit-xml", "dpath>=1.5.0,<2", "pyyaml>=5.4.1", - "boto3==1.17.27", + "boto3==1.17.*", "GitPython", "six==1.15.0", "jmespath",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,7 +41,7 @@\n \"junit-xml\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n- \"boto3==1.17.27\",\n+ \"boto3==1.17.*\",\n \"GitPython\",\n \"six==1.15.0\",\n \"jmespath\",\n", "issue": "boto3 is fixed at the patch level version\n**Is your feature request related to a problem? Please describe.**\r\nfree boto3 dependency patch version.\r\n\r\n**Describe the solution you'd like**\r\nreplace the line here:\r\nhttps://github.com/bridgecrewio/checkov/blob/master/Pipfile#L29\r\n\r\nwith \r\n```\r\nboto3 = \"==1.17.*\"\r\n```\r\n**Describe alternatives you've considered**\r\nthere are no alternatives as the patch version i don't see why is locked.\r\nit can cause conflicts with already installed boto3 library \r\n\r\n**Additional context**\r\n\r\n\nboto3 dependency install latest patch version\nBy submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.\r\n\r\nFixes #1211 \r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\"\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.18\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3==1.17.27\",\n \"GitPython\",\n \"six==1.15.0\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/nimrodkor/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\",\"integration_tests*\"]),\n include_package_data=True,\n package_dir={'checkov.terraform.checks.graph_checks': 'checkov/terraform/checks/graph_checks'},\n package_data = {'checkov.terraform.checks.graph_checks': ['aws/*.yaml', 'gcp/*.yaml', 'azure/*.yaml']},\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Security',\n 'Topic :: Software Development :: Build Tools'\n ]\n)\n", "path": "setup.py"}]}
1,466
114
gh_patches_debug_6047
rasdani/github-patches
git_diff
liqd__a4-meinberlin-2151
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> testing #1824 wrong wording Dashboard/Newsletter: the Word "Empfänger*innen" is at the wrong place. Please delete it above and replace "Projekt" by "Empfänger*innen sind alle, die diesem Projekt folgen:" </issue> <code> [start of meinberlin/apps/newsletters/forms.py] 1 from django import forms 2 from django.apps import apps 3 from django.conf import settings 4 from django.utils.translation import ugettext_lazy as _ 5 6 from adhocracy4.projects.models import Project 7 8 from . import models 9 10 Organisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL) 11 12 13 class NewsletterForm(forms.ModelForm): 14 class Meta: 15 model = models.Newsletter 16 fields = ['sender_name', 'sender', 'receivers', 'project', 17 'organisation', 'subject', 'body'] 18 19 def __init__(self, user=None, organisation=None, *args, **kwargs): 20 super().__init__(*args, **kwargs) 21 choices = [(value, string) 22 for value, string in models.RECEIVER_CHOICES 23 if value != models.PLATFORM or (user and user.is_superuser)] 24 self.fields['receivers'] = forms.ChoiceField( 25 label=_('Receivers'), 26 choices=choices, 27 widget=forms.RadioSelect(), 28 ) 29 30 project_qs = Project.objects 31 if organisation: 32 project_qs = Project.objects.filter(organisation=organisation.id) 33 34 self.fields['project'] = forms.ModelChoiceField( 35 label=_('Project'), 36 queryset=project_qs, 37 required=False, empty_label=None) 38 39 self.fields['organisation'] = forms.ModelChoiceField( 40 label=_('Organisation'), 41 queryset=Organisation.objects, 42 required=False, empty_label=None) 43 44 def clean(self): 45 cleaned_data = super().clean() 46 if cleaned_data.get('receivers') == str(models.PROJECT) and \ 47 not cleaned_data.get('project'): 48 self.add_error('project', _('Select a Project')) 49 50 51 class RestrictedNewsletterForm(forms.ModelForm): 52 53 class Meta: 54 model = models.Newsletter 55 fields = ['sender_name', 'sender', 'project', 'receivers', 56 'organisation', 'subject', 'body'] 57 58 def __init__(self, user=None, organisation=None, *args, **kwargs): 59 super().__init__(*args, **kwargs) 60 61 self.fields['receivers'].widget = forms.HiddenInput() 62 63 project_qs = Project.objects 64 if organisation: 65 project_qs = Project.objects.filter(organisation=organisation.id) 66 if user and not user.is_superuser: 67 user_groups = user.groups.all() 68 org_groups = organisation.groups.all() 69 shared_groups = user_groups & org_groups 70 group = shared_groups.distinct().first() 71 project_qs = project_qs.filter(group=group) 72 73 self.fields['project'] = forms.ModelChoiceField( 74 label=_('Project'), 75 queryset=project_qs, 76 required=False, empty_label=None) 77 78 self.fields['organisation'] = forms.ModelChoiceField( 79 label=_('Organisation'), 80 queryset=Organisation.objects, 81 required=False, empty_label=None) 82 [end of meinberlin/apps/newsletters/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/newsletters/forms.py b/meinberlin/apps/newsletters/forms.py --- a/meinberlin/apps/newsletters/forms.py +++ b/meinberlin/apps/newsletters/forms.py @@ -74,6 +74,8 @@ label=_('Project'), queryset=project_qs, required=False, empty_label=None) + self.fields['project'].label = _('Receivers are all users ' + 'which follow the following project:') self.fields['organisation'] = forms.ModelChoiceField( label=_('Organisation'),
{"golden_diff": "diff --git a/meinberlin/apps/newsletters/forms.py b/meinberlin/apps/newsletters/forms.py\n--- a/meinberlin/apps/newsletters/forms.py\n+++ b/meinberlin/apps/newsletters/forms.py\n@@ -74,6 +74,8 @@\n label=_('Project'),\n queryset=project_qs,\n required=False, empty_label=None)\n+ self.fields['project'].label = _('Receivers are all users '\n+ 'which follow the following project:')\n \n self.fields['organisation'] = forms.ModelChoiceField(\n label=_('Organisation'),\n", "issue": "testing #1824 wrong wording\nDashboard/Newsletter: the Word \"Empf\u00e4nger*innen\" is at the wrong place. Please delete it above and replace \"Projekt\" by \"Empf\u00e4nger*innen sind alle, die diesem Projekt folgen:\"\r\n \r\n\n", "before_files": [{"content": "from django import forms\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.projects.models import Project\n\nfrom . import models\n\nOrganisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)\n\n\nclass NewsletterForm(forms.ModelForm):\n class Meta:\n model = models.Newsletter\n fields = ['sender_name', 'sender', 'receivers', 'project',\n 'organisation', 'subject', 'body']\n\n def __init__(self, user=None, organisation=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n choices = [(value, string)\n for value, string in models.RECEIVER_CHOICES\n if value != models.PLATFORM or (user and user.is_superuser)]\n self.fields['receivers'] = forms.ChoiceField(\n label=_('Receivers'),\n choices=choices,\n widget=forms.RadioSelect(),\n )\n\n project_qs = Project.objects\n if organisation:\n project_qs = Project.objects.filter(organisation=organisation.id)\n\n self.fields['project'] = forms.ModelChoiceField(\n label=_('Project'),\n queryset=project_qs,\n required=False, empty_label=None)\n\n self.fields['organisation'] = forms.ModelChoiceField(\n label=_('Organisation'),\n queryset=Organisation.objects,\n required=False, empty_label=None)\n\n def clean(self):\n cleaned_data = super().clean()\n if cleaned_data.get('receivers') == str(models.PROJECT) and \\\n not cleaned_data.get('project'):\n self.add_error('project', _('Select a Project'))\n\n\nclass RestrictedNewsletterForm(forms.ModelForm):\n\n class Meta:\n model = models.Newsletter\n fields = ['sender_name', 'sender', 'project', 'receivers',\n 'organisation', 'subject', 'body']\n\n def __init__(self, user=None, organisation=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['receivers'].widget = forms.HiddenInput()\n\n project_qs = Project.objects\n if organisation:\n project_qs = Project.objects.filter(organisation=organisation.id)\n if user and not user.is_superuser:\n user_groups = user.groups.all()\n org_groups = organisation.groups.all()\n shared_groups = user_groups & org_groups\n group = shared_groups.distinct().first()\n project_qs = project_qs.filter(group=group)\n\n self.fields['project'] = forms.ModelChoiceField(\n label=_('Project'),\n queryset=project_qs,\n required=False, empty_label=None)\n\n self.fields['organisation'] = forms.ModelChoiceField(\n label=_('Organisation'),\n queryset=Organisation.objects,\n required=False, empty_label=None)\n", "path": "meinberlin/apps/newsletters/forms.py"}]}
1,340
125
gh_patches_debug_20413
rasdani/github-patches
git_diff
flairNLP__flair-198
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Generate text with language model Is there any script that can generate text from trained language model? Also calculating perplexity on testing data would be helpful. </issue> <code> [start of flair/models/language_model.py] 1 import torch.nn as nn 2 import torch 3 import math 4 from torch.autograd import Variable 5 from typing import Dict, List 6 from flair.data import Dictionary 7 8 9 class LanguageModel(nn.Module): 10 """Container module with an encoder, a recurrent module, and a decoder.""" 11 12 def __init__(self, 13 dictionary: Dictionary, 14 is_forward_lm: bool, 15 hidden_size: int, 16 nlayers: int, 17 embedding_size: int = 100, 18 nout=None, 19 dropout=0.5): 20 21 super(LanguageModel, self).__init__() 22 23 self.dictionary = dictionary 24 self.is_forward_lm: bool = is_forward_lm 25 26 self.dropout = dropout 27 self.hidden_size = hidden_size 28 self.embedding_size = embedding_size 29 self.nlayers = nlayers 30 31 self.drop = nn.Dropout(dropout) 32 self.encoder = nn.Embedding(len(dictionary), embedding_size) 33 34 if nlayers == 1: 35 self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers) 36 else: 37 self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout) 38 39 self.hidden = None 40 41 self.nout = nout 42 if nout is not None: 43 self.proj = nn.Linear(hidden_size, nout) 44 self.initialize(self.proj.weight) 45 self.decoder = nn.Linear(nout, len(dictionary)) 46 else: 47 self.proj = None 48 self.decoder = nn.Linear(hidden_size, len(dictionary)) 49 50 self.init_weights() 51 52 # auto-spawn on GPU if available 53 if torch.cuda.is_available(): 54 self.cuda() 55 56 def init_weights(self): 57 initrange = 0.1 58 self.encoder.weight.data.uniform_(-initrange, initrange) 59 self.decoder.bias.data.fill_(0) 60 self.decoder.weight.data.uniform_(-initrange, initrange) 61 62 def set_hidden(self, hidden): 63 self.hidden = hidden 64 65 def forward(self, input, hidden, ordered_sequence_lengths=None): 66 encoded = self.encoder(input) 67 emb = self.drop(encoded) 68 69 self.rnn.flatten_parameters() 70 71 output, hidden = self.rnn(emb, hidden) 72 73 if self.proj is not None: 74 output = self.proj(output) 75 76 output = self.drop(output) 77 78 decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2))) 79 80 return decoded.view(output.size(0), output.size(1), decoded.size(1)), output, hidden 81 82 def init_hidden(self, bsz): 83 weight = next(self.parameters()).data 84 return (Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()), 85 Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_())) 86 87 def get_representation(self, strings: List[str], detach_from_lm=True): 88 89 sequences_as_char_indices: List[List[int]] = [] 90 for string in strings: 91 char_indices = [self.dictionary.get_idx_for_item(char) for char in string] 92 sequences_as_char_indices.append(char_indices) 93 94 batch = Variable(torch.LongTensor(sequences_as_char_indices).transpose(0, 1)) 95 96 if torch.cuda.is_available(): 97 batch = batch.cuda() 98 99 hidden = self.init_hidden(len(strings)) 100 prediction, rnn_output, hidden = self.forward(batch, hidden) 101 102 if detach_from_lm: rnn_output = self.repackage_hidden(rnn_output) 103 104 return rnn_output 105 106 def repackage_hidden(self, h): 107 """Wraps hidden states in new Variables, to detach them from their history.""" 108 if type(h) == torch.Tensor: 109 return Variable(h.data) 110 else: 111 return tuple(self.repackage_hidden(v) for v in h) 112 113 def initialize(self, matrix): 114 in_, out_ = matrix.size() 115 stdv = math.sqrt(3. / (in_ + out_)) 116 matrix.data.uniform_(-stdv, stdv) 117 118 @classmethod 119 def load_language_model(cls, model_file): 120 121 if not torch.cuda.is_available(): 122 state = torch.load(model_file, map_location='cpu') 123 else: 124 state = torch.load(model_file) 125 126 model = LanguageModel(state['dictionary'], 127 state['is_forward_lm'], 128 state['hidden_size'], 129 state['nlayers'], 130 state['embedding_size'], 131 state['nout'], 132 state['dropout']) 133 model.load_state_dict(state['state_dict']) 134 model.eval() 135 if torch.cuda.is_available(): 136 model.cuda() 137 return model 138 139 def save(self, file): 140 model_state = { 141 'state_dict': self.state_dict(), 142 'dictionary': self.dictionary, 143 'is_forward_lm': self.is_forward_lm, 144 'hidden_size': self.hidden_size, 145 'nlayers': self.nlayers, 146 'embedding_size': self.embedding_size, 147 'nout': self.nout, 148 'dropout': self.dropout 149 } 150 torch.save(model_state, file, pickle_protocol=4) 151 [end of flair/models/language_model.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/flair/models/language_model.py b/flair/models/language_model.py --- a/flair/models/language_model.py +++ b/flair/models/language_model.py @@ -2,7 +2,7 @@ import torch import math from torch.autograd import Variable -from typing import Dict, List +from typing import List from flair.data import Dictionary @@ -148,3 +148,24 @@ 'dropout': self.dropout } torch.save(model_state, file, pickle_protocol=4) + + def generate_text(self, number_of_characters=1000) -> str: + characters = [] + + idx2item = self.dictionary.idx2item + + # initial hidden state + hidden = self.init_hidden(1) + input = torch.rand(1, 1).mul(len(idx2item)).long() + if torch.cuda.is_available(): + input = input.cuda() + + for i in range(number_of_characters): + prediction, rnn_output, hidden = self.forward(input, hidden) + word_weights = prediction.squeeze().data.div(1.0).exp().cpu() + word_idx = torch.multinomial(word_weights, 1)[0] + input.data.fill_(word_idx) + word = idx2item[word_idx].decode('UTF-8') + characters.append(word) + + return ''.join(characters)
{"golden_diff": "diff --git a/flair/models/language_model.py b/flair/models/language_model.py\n--- a/flair/models/language_model.py\n+++ b/flair/models/language_model.py\n@@ -2,7 +2,7 @@\n import torch\n import math\n from torch.autograd import Variable\n-from typing import Dict, List\n+from typing import List\n from flair.data import Dictionary\n \n \n@@ -148,3 +148,24 @@\n 'dropout': self.dropout\n }\n torch.save(model_state, file, pickle_protocol=4)\n+\n+ def generate_text(self, number_of_characters=1000) -> str:\n+ characters = []\n+\n+ idx2item = self.dictionary.idx2item\n+\n+ # initial hidden state\n+ hidden = self.init_hidden(1)\n+ input = torch.rand(1, 1).mul(len(idx2item)).long()\n+ if torch.cuda.is_available():\n+ input = input.cuda()\n+\n+ for i in range(number_of_characters):\n+ prediction, rnn_output, hidden = self.forward(input, hidden)\n+ word_weights = prediction.squeeze().data.div(1.0).exp().cpu()\n+ word_idx = torch.multinomial(word_weights, 1)[0]\n+ input.data.fill_(word_idx)\n+ word = idx2item[word_idx].decode('UTF-8')\n+ characters.append(word)\n+\n+ return ''.join(characters)\n", "issue": "Generate text with language model\nIs there any script that can generate text from trained language model?\r\nAlso calculating perplexity on testing data would be helpful.\n", "before_files": [{"content": "import torch.nn as nn\nimport torch\nimport math\nfrom torch.autograd import Variable\nfrom typing import Dict, List\nfrom flair.data import Dictionary\n\n\nclass LanguageModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self,\n dictionary: Dictionary,\n is_forward_lm: bool,\n hidden_size: int,\n nlayers: int,\n embedding_size: int = 100,\n nout=None,\n dropout=0.5):\n\n super(LanguageModel, self).__init__()\n\n self.dictionary = dictionary\n self.is_forward_lm: bool = is_forward_lm\n\n self.dropout = dropout\n self.hidden_size = hidden_size\n self.embedding_size = embedding_size\n self.nlayers = nlayers\n\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(len(dictionary), embedding_size)\n\n if nlayers == 1:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)\n else:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)\n\n self.hidden = None\n\n self.nout = nout\n if nout is not None:\n self.proj = nn.Linear(hidden_size, nout)\n self.initialize(self.proj.weight)\n self.decoder = nn.Linear(nout, len(dictionary))\n else:\n self.proj = None\n self.decoder = nn.Linear(hidden_size, len(dictionary))\n\n self.init_weights()\n\n # auto-spawn on GPU if available\n if torch.cuda.is_available():\n self.cuda()\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.fill_(0)\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def set_hidden(self, hidden):\n self.hidden = hidden\n\n def forward(self, input, hidden, ordered_sequence_lengths=None):\n encoded = self.encoder(input)\n emb = self.drop(encoded)\n\n self.rnn.flatten_parameters()\n\n output, hidden = self.rnn(emb, hidden)\n\n if self.proj is not None:\n output = self.proj(output)\n\n output = self.drop(output)\n\n decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))\n\n return decoded.view(output.size(0), output.size(1), decoded.size(1)), output, hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n return (Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()),\n Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()))\n\n def get_representation(self, strings: List[str], detach_from_lm=True):\n\n sequences_as_char_indices: List[List[int]] = []\n for string in strings:\n char_indices = [self.dictionary.get_idx_for_item(char) for char in string]\n sequences_as_char_indices.append(char_indices)\n\n batch = Variable(torch.LongTensor(sequences_as_char_indices).transpose(0, 1))\n\n if torch.cuda.is_available():\n batch = batch.cuda()\n\n hidden = self.init_hidden(len(strings))\n prediction, rnn_output, hidden = self.forward(batch, hidden)\n\n if detach_from_lm: rnn_output = self.repackage_hidden(rnn_output)\n\n return rnn_output\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Variables, to detach them from their history.\"\"\"\n if type(h) == torch.Tensor:\n return Variable(h.data)\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initialize(self, matrix):\n in_, out_ = matrix.size()\n stdv = math.sqrt(3. / (in_ + out_))\n matrix.data.uniform_(-stdv, stdv)\n\n @classmethod\n def load_language_model(cls, model_file):\n\n if not torch.cuda.is_available():\n state = torch.load(model_file, map_location='cpu')\n else:\n state = torch.load(model_file)\n\n model = LanguageModel(state['dictionary'],\n state['is_forward_lm'],\n state['hidden_size'],\n state['nlayers'],\n state['embedding_size'],\n state['nout'],\n state['dropout'])\n model.load_state_dict(state['state_dict'])\n model.eval()\n if torch.cuda.is_available():\n model.cuda()\n return model\n\n def save(self, file):\n model_state = {\n 'state_dict': self.state_dict(),\n 'dictionary': self.dictionary,\n 'is_forward_lm': self.is_forward_lm,\n 'hidden_size': self.hidden_size,\n 'nlayers': self.nlayers,\n 'embedding_size': self.embedding_size,\n 'nout': self.nout,\n 'dropout': self.dropout\n }\n torch.save(model_state, file, pickle_protocol=4)\n", "path": "flair/models/language_model.py"}]}
1,977
310
gh_patches_debug_35899
rasdani/github-patches
git_diff
bids-standard__pybids-598
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Keeping the docs current We're currently manually pushing the docs to https://bids-standard.github.io/pybids/, which is easy to forget, and the docs are currently out-of-date (0.9). It would be good to make an explicit process to update them or set up something automatic like ReadTheDocs. ReadTheDocs also has the advantage of keeping old versions around, which might be useful for reference as the API changes. </issue> <code> [start of doc/conf.py] 1 # Configuration file for the Sphinx documentation builder. 2 # 3 # This file only contains a selection of the most common options. For a full 4 # list see the documentation: 5 # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 7 # -- Path setup -------------------------------------------------------------- 8 9 # If extensions (or modules to document with autodoc) are in another directory, 10 # add these directories to sys.path here. If the directory is relative to the 11 # documentation root, use os.path.abspath to make it absolute, like shown here. 12 # 13 import os 14 import sys 15 import sphinx_rtd_theme 16 17 sys.path.insert(0, os.path.abspath('../')) 18 19 # -- Project information ----------------------------------------------------- 20 21 project = 'PyBIDS' 22 copyright = '2020, Developers of PyBIDS' 23 author = 'Developers of PyBIDS' 24 25 currentdir = os.path.abspath(os.path.dirname(__file__)) 26 from bids._version import get_versions 27 __version__ = get_versions()['version'] 28 source_version = __version__ 29 currentdir = os.path.abspath(os.path.dirname(__file__)) 30 31 # -- General configuration --------------------------------------------------- 32 33 # Add any Sphinx extension module names here, as strings. They can be 34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 35 # ones. 36 extensions = [ 37 'sphinx.ext.autodoc', 38 'sphinx.ext.autosummary', 39 'sphinx.ext.autosectionlabel', 40 'sphinx.ext.intersphinx', 41 'sphinx.ext.coverage', 42 'sphinx.ext.mathjax', 43 'sphinx.ext.viewcode', 44 'sphinx.ext.githubpages', 45 'sphinx.ext.napoleon', 46 'numpydoc', 47 'm2r' 48 ] 49 50 intersphinx_mapping = { 51 'http://docs.python.org/3.5': None, 52 'http://docs.scipy.org/doc/numpy': None, 53 'http://docs.scipy.org/doc/scipy/reference': None, 54 'http://matplotlib.org/': None, 55 'http://scikit-learn.org/0.17': None, 56 'http://nipy.org/nibabel/': None, 57 'http://pandas.pydata.org/pandas-docs/stable/': None, 58 'http://neurosynth.readthedocs.io/en/latest/': None, 59 } 60 61 # If your documentation needs a minimal Sphinx version, state it here. 62 needs_sphinx = '2.2.0' 63 64 # Generate stubs 65 autosummary_generate = True 66 autodoc_default_flags = ['members', 'inherited-members'] 67 add_module_names = False 68 69 # Add any paths that contain templates here, relative to this directory. 70 templates_path = ['_templates'] 71 72 # The suffix of source filenames. 73 source_suffix = ['.rst', '.md'] 74 75 # The master toctree document. 76 master_doc = 'index' 77 78 # List of patterns, relative to source directory, that match files and 79 # directories to ignore when looking for source files. 80 # This pattern also affects html_static_path and html_extra_path. 81 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 82 83 # The short X.Y version. 84 version = '' 85 # The full version, including alpha/beta/rc tags. 86 import bids 87 release = bids.__version__ 88 89 # The name of the Pygments (syntax highlighting) style to use. 90 pygments_style = 'sphinx' 91 92 # -- Options for HTML output ------------------------------------------------- 93 94 # The theme to use for HTML and HTML Help pages. See the documentation for 95 # a list of builtin themes. 96 # 97 # The theme to use for HTML and HTML Help pages. See the documentation for 98 # a list of builtin themes. 99 html_theme = "sphinx_rtd_theme" 100 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 101 102 # Add any paths that contain custom static files (such as style sheets) here, 103 # relative to this directory. They are copied after the builtin static files, 104 # so a file named "default.css" will overwrite the builtin "default.css". 105 html_static_path = ['_static'] 106 107 # If false, no module index is generated. 108 html_domain_indices = False 109 110 # One entry per manual page. List of tuples 111 # (source start file, name, description, authors, manual section). 112 man_pages = [ 113 ('index', 'bids', 'bids Documentation', 114 [author], 1) 115 ] 116 117 # Grouping the document tree into Texinfo files. List of tuples 118 # (source start file, target name, title, author, 119 # dir menu entry, description, category) 120 texinfo_documents = [ 121 ('index', 'bids', 'bids Documentation', 122 author, 'bids', 'One line description of project.', 123 'Miscellaneous'), 124 ] 125 126 # If false, no module index is generated. 127 texinfo_domain_indices = False 128 [end of doc/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/doc/conf.py b/doc/conf.py --- a/doc/conf.py +++ b/doc/conf.py @@ -12,20 +12,21 @@ # import os import sys +from datetime import date + import sphinx_rtd_theme +import bids + sys.path.insert(0, os.path.abspath('../')) # -- Project information ----------------------------------------------------- project = 'PyBIDS' -copyright = '2020, Developers of PyBIDS' +copyright = '2015-{}, Developers of PyBIDS'.format(date.today().year) author = 'Developers of PyBIDS' currentdir = os.path.abspath(os.path.dirname(__file__)) -from bids._version import get_versions -__version__ = get_versions()['version'] -source_version = __version__ currentdir = os.path.abspath(os.path.dirname(__file__)) # -- General configuration --------------------------------------------------- @@ -44,20 +45,21 @@ 'sphinx.ext.githubpages', 'sphinx.ext.napoleon', 'numpydoc', - 'm2r' ] intersphinx_mapping = { - 'http://docs.python.org/3.5': None, - 'http://docs.scipy.org/doc/numpy': None, - 'http://docs.scipy.org/doc/scipy/reference': None, - 'http://matplotlib.org/': None, - 'http://scikit-learn.org/0.17': None, - 'http://nipy.org/nibabel/': None, - 'http://pandas.pydata.org/pandas-docs/stable/': None, - 'http://neurosynth.readthedocs.io/en/latest/': None, + 'https://docs.python.org/3.5': None, + 'https://docs.scipy.org/doc/numpy': None, + 'https://docs.scipy.org/doc/scipy/reference': None, + 'https://matplotlib.org/': None, + 'https://scikit-learn.org/0.17': None, + 'https://nipy.org/nibabel/': None, + 'https://pandas.pydata.org/pandas-docs/stable/': None, + 'https://neurosynth.readthedocs.io/en/latest/': None, } +intersphinx_timeout = 5 + # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '2.2.0' @@ -81,10 +83,9 @@ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The short X.Y version. -version = '' +version = bids.__version__ # The full version, including alpha/beta/rc tags. -import bids -release = bids.__version__ +release = version # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx'
{"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -12,20 +12,21 @@\n #\n import os\n import sys\n+from datetime import date\n+\n import sphinx_rtd_theme\n \n+import bids\n+\n sys.path.insert(0, os.path.abspath('../'))\n \n # -- Project information -----------------------------------------------------\n \n project = 'PyBIDS'\n-copyright = '2020, Developers of PyBIDS'\n+copyright = '2015-{}, Developers of PyBIDS'.format(date.today().year)\n author = 'Developers of PyBIDS'\n \n currentdir = os.path.abspath(os.path.dirname(__file__))\n-from bids._version import get_versions\n-__version__ = get_versions()['version']\n-source_version = __version__\n currentdir = os.path.abspath(os.path.dirname(__file__))\n \n # -- General configuration ---------------------------------------------------\n@@ -44,20 +45,21 @@\n 'sphinx.ext.githubpages',\n 'sphinx.ext.napoleon',\n 'numpydoc',\n- 'm2r'\n ]\n \n intersphinx_mapping = {\n- 'http://docs.python.org/3.5': None,\n- 'http://docs.scipy.org/doc/numpy': None,\n- 'http://docs.scipy.org/doc/scipy/reference': None,\n- 'http://matplotlib.org/': None,\n- 'http://scikit-learn.org/0.17': None,\n- 'http://nipy.org/nibabel/': None,\n- 'http://pandas.pydata.org/pandas-docs/stable/': None,\n- 'http://neurosynth.readthedocs.io/en/latest/': None,\n+ 'https://docs.python.org/3.5': None,\n+ 'https://docs.scipy.org/doc/numpy': None,\n+ 'https://docs.scipy.org/doc/scipy/reference': None,\n+ 'https://matplotlib.org/': None,\n+ 'https://scikit-learn.org/0.17': None,\n+ 'https://nipy.org/nibabel/': None,\n+ 'https://pandas.pydata.org/pandas-docs/stable/': None,\n+ 'https://neurosynth.readthedocs.io/en/latest/': None,\n }\n \n+intersphinx_timeout = 5\n+\n # If your documentation needs a minimal Sphinx version, state it here.\n needs_sphinx = '2.2.0'\n \n@@ -81,10 +83,9 @@\n exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n \n # The short X.Y version.\n-version = ''\n+version = bids.__version__\n # The full version, including alpha/beta/rc tags.\n-import bids\n-release = bids.__version__\n+release = version\n \n # The name of the Pygments (syntax highlighting) style to use.\n pygments_style = 'sphinx'\n", "issue": "Keeping the docs current\nWe're currently manually pushing the docs to https://bids-standard.github.io/pybids/, which is easy to forget, and the docs are currently out-of-date (0.9). It would be good to make an explicit process to update them or set up something automatic like ReadTheDocs.\r\n\r\nReadTheDocs also has the advantage of keeping old versions around, which might be useful for reference as the API changes.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nimport sphinx_rtd_theme\n\nsys.path.insert(0, os.path.abspath('../'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'PyBIDS'\ncopyright = '2020, Developers of PyBIDS'\nauthor = 'Developers of PyBIDS'\n\ncurrentdir = os.path.abspath(os.path.dirname(__file__))\nfrom bids._version import get_versions\n__version__ = get_versions()['version']\nsource_version = __version__\ncurrentdir = os.path.abspath(os.path.dirname(__file__))\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.napoleon',\n 'numpydoc',\n 'm2r'\n]\n\nintersphinx_mapping = {\n 'http://docs.python.org/3.5': None,\n 'http://docs.scipy.org/doc/numpy': None,\n 'http://docs.scipy.org/doc/scipy/reference': None,\n 'http://matplotlib.org/': None,\n 'http://scikit-learn.org/0.17': None,\n 'http://nipy.org/nibabel/': None,\n 'http://pandas.pydata.org/pandas-docs/stable/': None,\n 'http://neurosynth.readthedocs.io/en/latest/': None,\n}\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '2.2.0'\n\n# Generate stubs\nautosummary_generate = True\nautodoc_default_flags = ['members', 'inherited-members']\nadd_module_names = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The short X.Y version.\nversion = ''\n# The full version, including alpha/beta/rc tags.\nimport bids\nrelease = bids.__version__\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If false, no module index is generated.\nhtml_domain_indices = False\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'bids', 'bids Documentation',\n [author], 1)\n]\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'bids', 'bids Documentation',\n author, 'bids', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# If false, no module index is generated.\ntexinfo_domain_indices = False\n", "path": "doc/conf.py"}]}
1,916
635
gh_patches_debug_25514
rasdani/github-patches
git_diff
DataDog__dd-trace-py-1049
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> httplib patch doesn't seem to support a default pin I'm trying to set up `httplib` patching in my application and I'm struggling to get it to send spans with a `service` set. I am not certain, but I think that the documentation and code for the `httplib` patch in `ddtrace.contrib.httplib` is broken, and doesn't support setting up a default service to send spans as. Here's my reproduction case: ```python import logging logging.basicConfig(level=logging.DEBUG) from ddtrace import patch patch(httplib=True) # Code taken from the ddtrace-py docs for httplib on python 3 from ddtrace import Pin import http.client import urllib.request # Use a Pin to specify metadata for all http requests Pin.override(http.client, service='httplib') resp = urllib.request.urlopen('https://www.datadoghq.com/') ``` If I run that locally on the latest master, I get messages like these from the `dd-trace-agent` running on my machine: ``` 2018-11-16 14:51:47 ERROR (api.go:249) - dropping trace reason: invalid span (SpanID:11098683504101424356): span.normalize: empty `Service` (debug for more info), [name:"http.client.request" resource:"http.client.request" traceID:2003532794568525569 spanID:11098683504101424356 start:1542397906651150080 duration:274516105 meta:<key:"http.method" value:"GET" > meta:<key:"http.status_code" value:"200" > meta:<key:"http.url" value:"https://www.datadoghq.com/" > meta:<key:"system.pid" value:"29250" > type:"http" ] ``` I did some debugging locally, and it looks like the default Pin added to the `HTTPSConnection` instance in the wrapped constructor (https://github.com/DataDog/dd-trace-py/blob/v0.16.0/ddtrace/contrib/httplib/patch.py#L19) is the one that ends up feeding the span's their service value. The pin added in the example code (as suggested by the docs) isn't having any effect. Unless there's something I'm missing, this seems like it makes sense, because the pin the docs suggest adding is on the `http.client` module, not on the classes inside nor on instances of those classes. How was that supposed to work @palazzem? </issue> <code> [start of ddtrace/contrib/httplib/__init__.py] 1 """ 2 Patch the built-in httplib/http.client libraries to trace all HTTP calls. 3 4 5 Usage:: 6 7 # Patch all supported modules/functions 8 from ddtrace import patch 9 patch(httplib=True) 10 11 # Python 2 12 from ddtrace import Pin 13 import httplib 14 import urllib 15 16 # Use a Pin to specify metadata for all http requests 17 Pin.override(httplib, service='httplib') 18 resp = urllib.urlopen('http://www.datadog.com/') 19 20 # Python 3 21 from ddtrace import Pin 22 import http.client 23 import urllib.request 24 25 # Use a Pin to specify metadata for all http requests 26 Pin.override(http.client, service='httplib') 27 resp = urllib.request.urlopen('http://www.datadog.com/') 28 29 :ref:`Headers tracing <http-headers-tracing>` is supported for this integration. 30 """ 31 from .patch import patch, unpatch 32 __all__ = ['patch', 'unpatch'] 33 [end of ddtrace/contrib/httplib/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ddtrace/contrib/httplib/__init__.py b/ddtrace/contrib/httplib/__init__.py --- a/ddtrace/contrib/httplib/__init__.py +++ b/ddtrace/contrib/httplib/__init__.py @@ -1,5 +1,5 @@ """ -Patch the built-in httplib/http.client libraries to trace all HTTP calls. +Patch the built-in ``httplib``/``http.client`` libraries to trace all HTTP calls. Usage:: @@ -9,23 +9,24 @@ patch(httplib=True) # Python 2 - from ddtrace import Pin import httplib import urllib - # Use a Pin to specify metadata for all http requests - Pin.override(httplib, service='httplib') resp = urllib.urlopen('http://www.datadog.com/') # Python 3 - from ddtrace import Pin import http.client import urllib.request - # Use a Pin to specify metadata for all http requests - Pin.override(http.client, service='httplib') resp = urllib.request.urlopen('http://www.datadog.com/') +``httplib`` spans do not include a default service name. Before HTTP calls are +made, ensure a parent span has been started with a service name to be used for +spans generated from those calls:: + + with tracer.trace('main', service='my-httplib-operation'): + resp = urllib.request.urlopen('http://www.datadog.com/') + :ref:`Headers tracing <http-headers-tracing>` is supported for this integration. """ from .patch import patch, unpatch
{"golden_diff": "diff --git a/ddtrace/contrib/httplib/__init__.py b/ddtrace/contrib/httplib/__init__.py\n--- a/ddtrace/contrib/httplib/__init__.py\n+++ b/ddtrace/contrib/httplib/__init__.py\n@@ -1,5 +1,5 @@\n \"\"\"\n-Patch the built-in httplib/http.client libraries to trace all HTTP calls.\n+Patch the built-in ``httplib``/``http.client`` libraries to trace all HTTP calls.\n \n \n Usage::\n@@ -9,23 +9,24 @@\n patch(httplib=True)\n \n # Python 2\n- from ddtrace import Pin\n import httplib\n import urllib\n \n- # Use a Pin to specify metadata for all http requests\n- Pin.override(httplib, service='httplib')\n resp = urllib.urlopen('http://www.datadog.com/')\n \n # Python 3\n- from ddtrace import Pin\n import http.client\n import urllib.request\n \n- # Use a Pin to specify metadata for all http requests\n- Pin.override(http.client, service='httplib')\n resp = urllib.request.urlopen('http://www.datadog.com/')\n \n+``httplib`` spans do not include a default service name. Before HTTP calls are\n+made, ensure a parent span has been started with a service name to be used for\n+spans generated from those calls::\n+\n+ with tracer.trace('main', service='my-httplib-operation'):\n+ resp = urllib.request.urlopen('http://www.datadog.com/')\n+\n :ref:`Headers tracing <http-headers-tracing>` is supported for this integration.\n \"\"\"\n from .patch import patch, unpatch\n", "issue": "httplib patch doesn't seem to support a default pin\nI'm trying to set up `httplib` patching in my application and I'm struggling to get it to send spans with a `service` set. I am not certain, but I think that the documentation and code for the `httplib` patch in `ddtrace.contrib.httplib` is broken, and doesn't support setting up a default service to send spans as.\r\n\r\nHere's my reproduction case:\r\n\r\n```python\r\nimport logging\r\nlogging.basicConfig(level=logging.DEBUG)\r\n\r\nfrom ddtrace import patch\r\npatch(httplib=True)\r\n\r\n# Code taken from the ddtrace-py docs for httplib on python 3\r\nfrom ddtrace import Pin\r\nimport http.client\r\nimport urllib.request\r\n\r\n# Use a Pin to specify metadata for all http requests\r\nPin.override(http.client, service='httplib')\r\nresp = urllib.request.urlopen('https://www.datadoghq.com/')\r\n```\r\n\r\nIf I run that locally on the latest master, I get messages like these from the `dd-trace-agent` running on my machine:\r\n\r\n```\r\n2018-11-16 14:51:47 ERROR (api.go:249) - dropping trace reason: invalid span (SpanID:11098683504101424356): span.normalize: empty `Service` (debug for more info), [name:\"http.client.request\" resource:\"http.client.request\" traceID:2003532794568525569 spanID:11098683504101424356 start:1542397906651150080 duration:274516105 meta:<key:\"http.method\" value:\"GET\" > meta:<key:\"http.status_code\" value:\"200\" > meta:<key:\"http.url\" value:\"https://www.datadoghq.com/\" > meta:<key:\"system.pid\" value:\"29250\" > type:\"http\" ]\r\n```\r\n\r\nI did some debugging locally, and it looks like the default Pin added to the `HTTPSConnection` instance in the wrapped constructor (https://github.com/DataDog/dd-trace-py/blob/v0.16.0/ddtrace/contrib/httplib/patch.py#L19) is the one that ends up feeding the span's their service value. The pin added in the example code (as suggested by the docs) isn't having any effect. Unless there's something I'm missing, this seems like it makes sense, because the pin the docs suggest adding is on the `http.client` module, not on the classes inside nor on instances of those classes. How was that supposed to work @palazzem? \n", "before_files": [{"content": "\"\"\"\nPatch the built-in httplib/http.client libraries to trace all HTTP calls.\n\n\nUsage::\n\n # Patch all supported modules/functions\n from ddtrace import patch\n patch(httplib=True)\n\n # Python 2\n from ddtrace import Pin\n import httplib\n import urllib\n\n # Use a Pin to specify metadata for all http requests\n Pin.override(httplib, service='httplib')\n resp = urllib.urlopen('http://www.datadog.com/')\n\n # Python 3\n from ddtrace import Pin\n import http.client\n import urllib.request\n\n # Use a Pin to specify metadata for all http requests\n Pin.override(http.client, service='httplib')\n resp = urllib.request.urlopen('http://www.datadog.com/')\n\n:ref:`Headers tracing <http-headers-tracing>` is supported for this integration.\n\"\"\"\nfrom .patch import patch, unpatch\n__all__ = ['patch', 'unpatch']\n", "path": "ddtrace/contrib/httplib/__init__.py"}]}
1,414
365
gh_patches_debug_4366
rasdani/github-patches
git_diff
ephios-dev__ephios-1078
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Notification Mails detected as SPAM **Describe the bug** This is likely specific to the exact email setup users have but in my case, the notification mails in my case are caught by the SpamAssassin rule `TO_NO_BRKTS_HTML_IMG` which is discussed [here](https://stackoverflow.com/questions/40082594/meaning-of-to-no-brkts-html-img-in-spamassassin). As a fix for this, it would be useful if ephios used the configured first and last name to properly address the users in the `To:` field of the email. **To Reproduce** Make ephios send a notification email. **Expected behavior** Emails should arrive in my inbox without being detected as SPAM. This could be done by modifying the `To:` field. **Screenshots** \- **Environment** An email SPAM filter using SpamAssassin **Additional context** \- </issue> <code> [start of ephios/core/services/notifications/backends.py] 1 import logging 2 import smtplib 3 import traceback 4 5 from django.conf import settings 6 from django.core.mail import mail_admins 7 from django.utils.translation import gettext_lazy as _ 8 from webpush import send_user_notification 9 10 from ephios.core.models.users import Notification 11 from ephios.core.services.mail.send import send_mail 12 13 logger = logging.getLogger(__name__) 14 15 16 def installed_notification_backends(): 17 from ephios.core.signals import register_notification_backends 18 19 for _, backends in register_notification_backends.send_to_all_plugins(None): 20 yield from (b() for b in backends) 21 22 23 def enabled_notification_backends(): 24 from ephios.core.signals import register_notification_backends 25 26 for _, backends in register_notification_backends.send(None): 27 yield from (b() for b in backends) 28 29 30 def send_all_notifications(): 31 for backend in installed_notification_backends(): 32 for notification in Notification.objects.filter(failed=False): 33 if backend.can_send(notification) and backend.user_prefers_sending(notification): 34 try: 35 backend.send(notification) 36 except Exception as e: # pylint: disable=broad-except 37 if settings.DEBUG: 38 raise e 39 notification.failed = True 40 notification.save() 41 try: 42 mail_admins( 43 "Notification sending failed", 44 f"Notification: {notification}\nException: {e}\n{traceback.format_exc()}", 45 ) 46 except smtplib.SMTPConnectError: 47 pass # if the mail backend threw this, mail admin will probably throw this as well 48 logger.warning( 49 f"Notification sending failed for notification object #{notification.pk} ({notification}) for backend {backend} with {e}" 50 ) 51 Notification.objects.filter(failed=False).delete() 52 53 54 class AbstractNotificationBackend: 55 @property 56 def slug(self): 57 return NotImplementedError 58 59 @property 60 def title(self): 61 return NotImplementedError 62 63 @classmethod 64 def can_send(cls, notification): 65 return notification.user is not None 66 67 @classmethod 68 def user_prefers_sending(cls, notification): 69 if notification.notification_type.unsubscribe_allowed and notification.user is not None: 70 if not notification.user.is_active: 71 return False 72 backends = notification.user.preferences["notifications__notifications"].get( 73 notification.slug 74 ) 75 if backends is not None: 76 return cls.slug in backends 77 return True 78 79 @classmethod 80 def send(cls, notification: Notification): 81 raise NotImplementedError 82 83 84 class EmailNotificationBackend(AbstractNotificationBackend): 85 slug = "ephios_backend_email" 86 title = _("via email") 87 88 @classmethod 89 def can_send(cls, notification): 90 return notification.user is not None or "email" in notification.data 91 92 @classmethod 93 def _get_mailaddress(cls, notification): 94 return notification.user.email if notification.user else notification.data.get("email") 95 96 @classmethod 97 def send(cls, notification): 98 send_mail( 99 to=[cls._get_mailaddress(notification)], 100 subject=notification.subject, 101 plaintext=notification.as_plaintext(), 102 html=notification.as_html(), 103 is_autogenerated=True, 104 ) 105 106 107 class WebPushNotificationBackend(AbstractNotificationBackend): 108 slug = "ephios_backend_webpush" 109 title = _("via push notification") 110 111 @classmethod 112 def send(cls, notification): 113 payload = { 114 "head": str(notification.subject), 115 "body": notification.body, 116 "icon": "/static/ephios/img/ephios-symbol-red.svg", 117 } 118 if actions := notification.get_actions(): 119 payload["url"] = actions[0][1] 120 send_user_notification(user=notification.user, payload=payload, ttl=1000) 121 122 123 CORE_NOTIFICATION_BACKENDS = [EmailNotificationBackend, WebPushNotificationBackend] 124 [end of ephios/core/services/notifications/backends.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ephios/core/services/notifications/backends.py b/ephios/core/services/notifications/backends.py --- a/ephios/core/services/notifications/backends.py +++ b/ephios/core/services/notifications/backends.py @@ -91,7 +91,9 @@ @classmethod def _get_mailaddress(cls, notification): - return notification.user.email if notification.user else notification.data.get("email") + if notification.user: + return f"{notification.user.get_full_name()} <{notification.user.email}>" + return notification.data.get("email") @classmethod def send(cls, notification):
{"golden_diff": "diff --git a/ephios/core/services/notifications/backends.py b/ephios/core/services/notifications/backends.py\n--- a/ephios/core/services/notifications/backends.py\n+++ b/ephios/core/services/notifications/backends.py\n@@ -91,7 +91,9 @@\n \n @classmethod\n def _get_mailaddress(cls, notification):\n- return notification.user.email if notification.user else notification.data.get(\"email\")\n+ if notification.user:\n+ return f\"{notification.user.get_full_name()} <{notification.user.email}>\"\n+ return notification.data.get(\"email\")\n \n @classmethod\n def send(cls, notification):\n", "issue": "Notification Mails detected as SPAM\n**Describe the bug**\r\nThis is likely specific to the exact email setup users have but in my case, the notification mails in my case are caught by the SpamAssassin rule `TO_NO_BRKTS_HTML_IMG` which is discussed [here](https://stackoverflow.com/questions/40082594/meaning-of-to-no-brkts-html-img-in-spamassassin). As a fix for this, it would be useful if ephios used the configured first and last name to properly address the users in the `To:` field of the email. \r\n\r\n**To Reproduce**\r\nMake ephios send a notification email.\r\n\r\n**Expected behavior**\r\nEmails should arrive in my inbox without being detected as SPAM. This could be done by modifying the `To:` field. \r\n\r\n**Screenshots**\r\n\\-\r\n\r\n**Environment**\r\nAn email SPAM filter using SpamAssassin\r\n\r\n**Additional context**\r\n\\-\r\n\n", "before_files": [{"content": "import logging\nimport smtplib\nimport traceback\n\nfrom django.conf import settings\nfrom django.core.mail import mail_admins\nfrom django.utils.translation import gettext_lazy as _\nfrom webpush import send_user_notification\n\nfrom ephios.core.models.users import Notification\nfrom ephios.core.services.mail.send import send_mail\n\nlogger = logging.getLogger(__name__)\n\n\ndef installed_notification_backends():\n from ephios.core.signals import register_notification_backends\n\n for _, backends in register_notification_backends.send_to_all_plugins(None):\n yield from (b() for b in backends)\n\n\ndef enabled_notification_backends():\n from ephios.core.signals import register_notification_backends\n\n for _, backends in register_notification_backends.send(None):\n yield from (b() for b in backends)\n\n\ndef send_all_notifications():\n for backend in installed_notification_backends():\n for notification in Notification.objects.filter(failed=False):\n if backend.can_send(notification) and backend.user_prefers_sending(notification):\n try:\n backend.send(notification)\n except Exception as e: # pylint: disable=broad-except\n if settings.DEBUG:\n raise e\n notification.failed = True\n notification.save()\n try:\n mail_admins(\n \"Notification sending failed\",\n f\"Notification: {notification}\\nException: {e}\\n{traceback.format_exc()}\",\n )\n except smtplib.SMTPConnectError:\n pass # if the mail backend threw this, mail admin will probably throw this as well\n logger.warning(\n f\"Notification sending failed for notification object #{notification.pk} ({notification}) for backend {backend} with {e}\"\n )\n Notification.objects.filter(failed=False).delete()\n\n\nclass AbstractNotificationBackend:\n @property\n def slug(self):\n return NotImplementedError\n\n @property\n def title(self):\n return NotImplementedError\n\n @classmethod\n def can_send(cls, notification):\n return notification.user is not None\n\n @classmethod\n def user_prefers_sending(cls, notification):\n if notification.notification_type.unsubscribe_allowed and notification.user is not None:\n if not notification.user.is_active:\n return False\n backends = notification.user.preferences[\"notifications__notifications\"].get(\n notification.slug\n )\n if backends is not None:\n return cls.slug in backends\n return True\n\n @classmethod\n def send(cls, notification: Notification):\n raise NotImplementedError\n\n\nclass EmailNotificationBackend(AbstractNotificationBackend):\n slug = \"ephios_backend_email\"\n title = _(\"via email\")\n\n @classmethod\n def can_send(cls, notification):\n return notification.user is not None or \"email\" in notification.data\n\n @classmethod\n def _get_mailaddress(cls, notification):\n return notification.user.email if notification.user else notification.data.get(\"email\")\n\n @classmethod\n def send(cls, notification):\n send_mail(\n to=[cls._get_mailaddress(notification)],\n subject=notification.subject,\n plaintext=notification.as_plaintext(),\n html=notification.as_html(),\n is_autogenerated=True,\n )\n\n\nclass WebPushNotificationBackend(AbstractNotificationBackend):\n slug = \"ephios_backend_webpush\"\n title = _(\"via push notification\")\n\n @classmethod\n def send(cls, notification):\n payload = {\n \"head\": str(notification.subject),\n \"body\": notification.body,\n \"icon\": \"/static/ephios/img/ephios-symbol-red.svg\",\n }\n if actions := notification.get_actions():\n payload[\"url\"] = actions[0][1]\n send_user_notification(user=notification.user, payload=payload, ttl=1000)\n\n\nCORE_NOTIFICATION_BACKENDS = [EmailNotificationBackend, WebPushNotificationBackend]\n", "path": "ephios/core/services/notifications/backends.py"}]}
1,803
141
gh_patches_debug_9171
rasdani/github-patches
git_diff
nilearn__nilearn-4267
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Adding tests for rapid inspection of visual elements **What would you like changed/added and why?** Quoting @jeromedockes here, from https://github.com/nilearn/nilearn/pull/2191#issuecomment-546278888 Since we are adding reports etc. we should add more meaningful inspection of generated plots and reports in the tests **What would be the benefit? Does the change make something easier to use?** I think CircleCI does all this but it takes way too long. Maybe a faster simpler test to quickly generate plots and reports to visually examine will be useful? </issue> <code> [start of nilearn/reporting/__init__.py] 1 """Reporting code for nilearn. 2 3 This module implements plotting functions useful to report analysis results. 4 5 Author: Martin Perez-Guevara, Elvis Dohmatob, 2017 6 """ 7 8 from nilearn.reporting.get_clusters_table import get_clusters_table 9 from nilearn.reporting.glm_reporter import make_glm_report 10 from nilearn.reporting.html_report import HTMLReport 11 12 __all__ = [ 13 "HTMLReport", 14 "get_clusters_table", 15 "make_glm_report", 16 ] 17 [end of nilearn/reporting/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nilearn/reporting/__init__.py b/nilearn/reporting/__init__.py --- a/nilearn/reporting/__init__.py +++ b/nilearn/reporting/__init__.py @@ -1,10 +1,10 @@ """Reporting code for nilearn. This module implements plotting functions useful to report analysis results. - -Author: Martin Perez-Guevara, Elvis Dohmatob, 2017 """ +# Author: Martin Perez-Guevara, Elvis Dohmatob, 2017 + from nilearn.reporting.get_clusters_table import get_clusters_table from nilearn.reporting.glm_reporter import make_glm_report from nilearn.reporting.html_report import HTMLReport
{"golden_diff": "diff --git a/nilearn/reporting/__init__.py b/nilearn/reporting/__init__.py\n--- a/nilearn/reporting/__init__.py\n+++ b/nilearn/reporting/__init__.py\n@@ -1,10 +1,10 @@\n \"\"\"Reporting code for nilearn.\n \n This module implements plotting functions useful to report analysis results.\n-\n-Author: Martin Perez-Guevara, Elvis Dohmatob, 2017\n \"\"\"\n \n+# Author: Martin Perez-Guevara, Elvis Dohmatob, 2017\n+\n from nilearn.reporting.get_clusters_table import get_clusters_table\n from nilearn.reporting.glm_reporter import make_glm_report\n from nilearn.reporting.html_report import HTMLReport\n", "issue": "Adding tests for rapid inspection of visual elements\n**What would you like changed/added and why?**\r\n\r\nQuoting @jeromedockes here, from https://github.com/nilearn/nilearn/pull/2191#issuecomment-546278888\r\nSince we are adding reports etc. we should add more meaningful inspection of generated plots and reports in the tests\r\n\r\n**What would be the benefit? Does the change make something easier to use?**\r\n\r\n I think CircleCI does all this but it takes way too long. Maybe a faster simpler test to quickly generate plots and reports to visually examine will be useful?\n", "before_files": [{"content": "\"\"\"Reporting code for nilearn.\n\nThis module implements plotting functions useful to report analysis results.\n\nAuthor: Martin Perez-Guevara, Elvis Dohmatob, 2017\n\"\"\"\n\nfrom nilearn.reporting.get_clusters_table import get_clusters_table\nfrom nilearn.reporting.glm_reporter import make_glm_report\nfrom nilearn.reporting.html_report import HTMLReport\n\n__all__ = [\n \"HTMLReport\",\n \"get_clusters_table\",\n \"make_glm_report\",\n]\n", "path": "nilearn/reporting/__init__.py"}]}
812
166
gh_patches_debug_9225
rasdani/github-patches
git_diff
open-mmlab__mmpose-259
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Pylint: W0707 ```bash mmpose/utils/hooks.py:37:20: W0707: Consider explicitly re-raising using the 'from' keyword (raise-missing-from) ``` </issue> <code> [start of mmpose/utils/hooks.py] 1 import functools 2 3 4 class OutputHook: 5 6 def __init__(self, module, outputs=None, as_tensor=False): 7 self.outputs = outputs 8 self.as_tensor = as_tensor 9 self.layer_outputs = {} 10 self.register(module) 11 12 def register(self, module): 13 14 def hook_wrapper(name): 15 16 def hook(model, input, output): 17 if self.as_tensor: 18 self.layer_outputs[name] = output 19 else: 20 if isinstance(output, list): 21 self.layer_outputs[name] = [ 22 out.detach().cpu().numpy() for out in output 23 ] 24 else: 25 self.layer_outputs[name] = output.detach().cpu().numpy( 26 ) 27 28 return hook 29 30 self.handles = [] 31 if isinstance(self.outputs, (list, tuple)): 32 for name in self.outputs: 33 try: 34 layer = rgetattr(module, name) 35 h = layer.register_forward_hook(hook_wrapper(name)) 36 except AttributeError: 37 raise AttributeError(f'Module {name} not found') 38 self.handles.append(h) 39 40 def remove(self): 41 for h in self.handles: 42 h.remove() 43 44 def __enter__(self): 45 return self 46 47 def __exit__(self, exc_type, exc_val, exc_tb): 48 self.remove() 49 50 51 # using wonder's beautiful simplification: 52 # https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects 53 def rgetattr(obj, attr, *args): 54 55 def _getattr(obj, attr): 56 return getattr(obj, attr, *args) 57 58 return functools.reduce(_getattr, [obj] + attr.split('.')) 59 [end of mmpose/utils/hooks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mmpose/utils/hooks.py b/mmpose/utils/hooks.py --- a/mmpose/utils/hooks.py +++ b/mmpose/utils/hooks.py @@ -33,8 +33,9 @@ try: layer = rgetattr(module, name) h = layer.register_forward_hook(hook_wrapper(name)) - except AttributeError: - raise AttributeError(f'Module {name} not found') + except ModuleNotFoundError as module_not_found: + raise ModuleNotFoundError( + f'Module {name} not found') from module_not_found self.handles.append(h) def remove(self):
{"golden_diff": "diff --git a/mmpose/utils/hooks.py b/mmpose/utils/hooks.py\n--- a/mmpose/utils/hooks.py\n+++ b/mmpose/utils/hooks.py\n@@ -33,8 +33,9 @@\n try:\n layer = rgetattr(module, name)\n h = layer.register_forward_hook(hook_wrapper(name))\n- except AttributeError:\n- raise AttributeError(f'Module {name} not found')\n+ except ModuleNotFoundError as module_not_found:\n+ raise ModuleNotFoundError(\n+ f'Module {name} not found') from module_not_found\n self.handles.append(h)\n \n def remove(self):\n", "issue": "Pylint: W0707\n```bash\r\nmmpose/utils/hooks.py:37:20: W0707: Consider explicitly re-raising using the 'from' keyword (raise-missing-from)\r\n```\n", "before_files": [{"content": "import functools\n\n\nclass OutputHook:\n\n def __init__(self, module, outputs=None, as_tensor=False):\n self.outputs = outputs\n self.as_tensor = as_tensor\n self.layer_outputs = {}\n self.register(module)\n\n def register(self, module):\n\n def hook_wrapper(name):\n\n def hook(model, input, output):\n if self.as_tensor:\n self.layer_outputs[name] = output\n else:\n if isinstance(output, list):\n self.layer_outputs[name] = [\n out.detach().cpu().numpy() for out in output\n ]\n else:\n self.layer_outputs[name] = output.detach().cpu().numpy(\n )\n\n return hook\n\n self.handles = []\n if isinstance(self.outputs, (list, tuple)):\n for name in self.outputs:\n try:\n layer = rgetattr(module, name)\n h = layer.register_forward_hook(hook_wrapper(name))\n except AttributeError:\n raise AttributeError(f'Module {name} not found')\n self.handles.append(h)\n\n def remove(self):\n for h in self.handles:\n h.remove()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.remove()\n\n\n# using wonder's beautiful simplification:\n# https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects\ndef rgetattr(obj, attr, *args):\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split('.'))\n", "path": "mmpose/utils/hooks.py"}]}
1,050
136
gh_patches_debug_23405
rasdani/github-patches
git_diff
Theano__Theano-5045
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Theano deprecation warning I have started running my code with warnings as errors. Theano trips the deprecation warning: ``` File "/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/configparser.py", line 1209, in __init__ DeprecationWarning, stacklevel=2 DeprecationWarning: The SafeConfigParser class has been renamed to ConfigParser in Python 3.2. This alias will be removed in future versions. Use ConfigParser directly instead. ``` Please use: ``` theano_cfg = (ConfigParser.ConfigParser if six.PY3 else ConfigParser.SafeConfigParser)(… ``` </issue> <code> [start of theano/misc/windows.py] 1 from __future__ import absolute_import, print_function, division 2 import os 3 import subprocess 4 5 6 def subprocess_Popen(command, **params): 7 """ 8 Utility function to work around windows behavior that open windows. 9 10 :see: call_subprocess_Popen and output_subprocess_Popen 11 """ 12 startupinfo = None 13 if os.name == 'nt': 14 startupinfo = subprocess.STARTUPINFO() 15 try: 16 startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW 17 except AttributeError: 18 startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW 19 20 # Anaconda for Windows does not always provide .exe files 21 # in the PATH, they also have .bat files that call the corresponding 22 # executable. For instance, "g++.bat" is in the PATH, not "g++.exe" 23 # Unless "shell=True", "g++.bat" is not executed when trying to 24 # execute "g++" without extensions. 25 # (Executing "g++.bat" explicitly would also work.) 26 params['shell'] = True 27 # "If shell is True, it is recommended to pass args as a string rather than as a sequence." (cite taken from https://docs.python.org/2/library/subprocess.html#frequently-used-arguments) 28 # In case when command arguments have spaces, passing a command as a list will result in incorrect arguments break down, and consequently 29 # in "The filename, directory name, or volume label syntax is incorrect" error message. 30 # Passing the command as a single string solves this problem. 31 if isinstance(command, list): 32 command = ' '.join(command) 33 34 # Using the dummy file descriptors below is a workaround for a 35 # crash experienced in an unusual Python 2.4.4 Windows environment 36 # with the default None values. 37 stdin = None 38 if "stdin" not in params: 39 stdin = open(os.devnull) 40 params['stdin'] = stdin.fileno() 41 42 try: 43 proc = subprocess.Popen(command, startupinfo=startupinfo, **params) 44 finally: 45 if stdin is not None: 46 del stdin 47 return proc 48 49 50 def call_subprocess_Popen(command, **params): 51 """ 52 Calls subprocess_Popen and discards the output, returning only the 53 exit code. 54 """ 55 if 'stdout' in params or 'stderr' in params: 56 raise TypeError("don't use stderr or stdout with call_subprocess_Popen") 57 with open(os.devnull, 'wb') as null: 58 # stdin to devnull is a workaround for a crash in a weird Windows 59 # environment where sys.stdin was None 60 params.setdefault('stdin', null) 61 params['stdout'] = null 62 params['stderr'] = null 63 p = subprocess_Popen(command, **params) 64 returncode = p.wait() 65 return returncode 66 67 68 def output_subprocess_Popen(command, **params): 69 """ 70 Calls subprocess_Popen, returning the output, error and exit code 71 in a tuple. 72 """ 73 if 'stdout' in params or 'stderr' in params: 74 raise TypeError("don't use stderr or stdout with output_subprocess_Popen") 75 # stdin to devnull is a workaround for a crash in a weird Windows 76 # environement where sys.stdin was None 77 if not hasattr(params, 'stdin'): 78 null = open(os.devnull, 'wb') 79 params['stdin'] = null 80 params['stdout'] = subprocess.PIPE 81 params['stderr'] = subprocess.PIPE 82 p = subprocess_Popen(command, **params) 83 # we need to use communicate to make sure we don't deadlock around 84 # the stdour/stderr pipe. 85 out = p.communicate() 86 return out + (p.returncode,) 87 [end of theano/misc/windows.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/theano/misc/windows.py b/theano/misc/windows.py --- a/theano/misc/windows.py +++ b/theano/misc/windows.py @@ -43,7 +43,7 @@ proc = subprocess.Popen(command, startupinfo=startupinfo, **params) finally: if stdin is not None: - del stdin + stdin.close() return proc @@ -72,15 +72,10 @@ """ if 'stdout' in params or 'stderr' in params: raise TypeError("don't use stderr or stdout with output_subprocess_Popen") - # stdin to devnull is a workaround for a crash in a weird Windows - # environement where sys.stdin was None - if not hasattr(params, 'stdin'): - null = open(os.devnull, 'wb') - params['stdin'] = null params['stdout'] = subprocess.PIPE params['stderr'] = subprocess.PIPE p = subprocess_Popen(command, **params) # we need to use communicate to make sure we don't deadlock around - # the stdour/stderr pipe. + # the stdout/stderr pipe. out = p.communicate() return out + (p.returncode,)
{"golden_diff": "diff --git a/theano/misc/windows.py b/theano/misc/windows.py\n--- a/theano/misc/windows.py\n+++ b/theano/misc/windows.py\n@@ -43,7 +43,7 @@\n proc = subprocess.Popen(command, startupinfo=startupinfo, **params)\n finally:\n if stdin is not None:\n- del stdin\n+ stdin.close()\n return proc\n \n \n@@ -72,15 +72,10 @@\n \"\"\"\n if 'stdout' in params or 'stderr' in params:\n raise TypeError(\"don't use stderr or stdout with output_subprocess_Popen\")\n- # stdin to devnull is a workaround for a crash in a weird Windows\n- # environement where sys.stdin was None\n- if not hasattr(params, 'stdin'):\n- null = open(os.devnull, 'wb')\n- params['stdin'] = null\n params['stdout'] = subprocess.PIPE\n params['stderr'] = subprocess.PIPE\n p = subprocess_Popen(command, **params)\n # we need to use communicate to make sure we don't deadlock around\n- # the stdour/stderr pipe.\n+ # the stdout/stderr pipe.\n out = p.communicate()\n return out + (p.returncode,)\n", "issue": "Theano deprecation warning\nI have started running my code with warnings as errors. Theano trips the deprecation warning:\n\n```\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/configparser.py\", line 1209, in __init__\n DeprecationWarning, stacklevel=2\nDeprecationWarning: The SafeConfigParser class has been renamed to ConfigParser in Python 3.2. This alias will be removed in future versions. Use ConfigParser directly instead.\n```\n\nPlease use:\n\n```\ntheano_cfg = (ConfigParser.ConfigParser if six.PY3 else ConfigParser.SafeConfigParser)(\u2026\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function, division\nimport os\nimport subprocess\n\n\ndef subprocess_Popen(command, **params):\n \"\"\"\n Utility function to work around windows behavior that open windows.\n\n :see: call_subprocess_Popen and output_subprocess_Popen\n \"\"\"\n startupinfo = None\n if os.name == 'nt':\n startupinfo = subprocess.STARTUPINFO()\n try:\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n except AttributeError:\n startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW\n\n # Anaconda for Windows does not always provide .exe files\n # in the PATH, they also have .bat files that call the corresponding\n # executable. For instance, \"g++.bat\" is in the PATH, not \"g++.exe\"\n # Unless \"shell=True\", \"g++.bat\" is not executed when trying to\n # execute \"g++\" without extensions.\n # (Executing \"g++.bat\" explicitly would also work.)\n params['shell'] = True\n # \"If shell is True, it is recommended to pass args as a string rather than as a sequence.\" (cite taken from https://docs.python.org/2/library/subprocess.html#frequently-used-arguments)\n # In case when command arguments have spaces, passing a command as a list will result in incorrect arguments break down, and consequently\n # in \"The filename, directory name, or volume label syntax is incorrect\" error message.\n # Passing the command as a single string solves this problem.\n if isinstance(command, list):\n command = ' '.join(command)\n\n # Using the dummy file descriptors below is a workaround for a\n # crash experienced in an unusual Python 2.4.4 Windows environment\n # with the default None values.\n stdin = None\n if \"stdin\" not in params:\n stdin = open(os.devnull)\n params['stdin'] = stdin.fileno()\n\n try:\n proc = subprocess.Popen(command, startupinfo=startupinfo, **params)\n finally:\n if stdin is not None:\n del stdin\n return proc\n\n\ndef call_subprocess_Popen(command, **params):\n \"\"\"\n Calls subprocess_Popen and discards the output, returning only the\n exit code.\n \"\"\"\n if 'stdout' in params or 'stderr' in params:\n raise TypeError(\"don't use stderr or stdout with call_subprocess_Popen\")\n with open(os.devnull, 'wb') as null:\n # stdin to devnull is a workaround for a crash in a weird Windows\n # environment where sys.stdin was None\n params.setdefault('stdin', null)\n params['stdout'] = null\n params['stderr'] = null\n p = subprocess_Popen(command, **params)\n returncode = p.wait()\n return returncode\n\n\ndef output_subprocess_Popen(command, **params):\n \"\"\"\n Calls subprocess_Popen, returning the output, error and exit code\n in a tuple.\n \"\"\"\n if 'stdout' in params or 'stderr' in params:\n raise TypeError(\"don't use stderr or stdout with output_subprocess_Popen\")\n # stdin to devnull is a workaround for a crash in a weird Windows\n # environement where sys.stdin was None\n if not hasattr(params, 'stdin'):\n null = open(os.devnull, 'wb')\n params['stdin'] = null\n params['stdout'] = subprocess.PIPE\n params['stderr'] = subprocess.PIPE\n p = subprocess_Popen(command, **params)\n # we need to use communicate to make sure we don't deadlock around\n # the stdour/stderr pipe.\n out = p.communicate()\n return out + (p.returncode,)\n", "path": "theano/misc/windows.py"}]}
1,649
273
gh_patches_debug_35090
rasdani/github-patches
git_diff
chainer__chainer-1191
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Inconsistency of type checking between SoftmaxCrossEntropy and Accuracy We often evaluate training procedure with both `softmax_cross_entropy` and `accuracy`, as default `chainer.links.Classifier` does. But there are cases `softmax_cross_entorpy(x, t)` is accepted but `accuracy(x, t)` raises error because of its type checking (and vise versa). Replicated softmax is one of a such examples (e.g. `x.shape = (B, C, N)` and `t.shape = (B, N)`). We need to reshape them manually in such a case. It is convenient for users that shape checking of the two be more consistent. </issue> <code> [start of chainer/functions/evaluation/accuracy.py] 1 import numpy 2 3 from chainer import cuda 4 from chainer import function 5 from chainer.utils import type_check 6 7 8 class Accuracy(function.Function): 9 10 def __init__(self, ignore_label=None): 11 self.ignore_label = ignore_label 12 13 def check_type_forward(self, in_types): 14 type_check.expect(in_types.size() == 2) 15 x_type, t_type = in_types 16 17 type_check.expect( 18 x_type.dtype.kind == 'f', 19 x_type.ndim >= 2, 20 t_type.dtype == numpy.int32, 21 t_type.ndim == 1, 22 t_type.shape[0] == x_type.shape[0], 23 ) 24 for i in range(2, x_type.ndim.eval()): 25 type_check.expect(x_type.shape[i] == 1) 26 27 def forward(self, inputs): 28 xp = cuda.get_array_module(*inputs) 29 y, t = inputs 30 y = y.reshape(len(y), -1) # flatten 31 32 if self.ignore_label is not None: 33 mask = (t == self.ignore_label) 34 ignore_cnt = mask.sum() 35 36 # will always be true when the true label is ignore_label 37 # TODO(henry0312) 38 # If cupy.where returns indexes, we could make the code better. 39 # Also, we would need Advanced Indexing. 40 pred = xp.where(mask, self.ignore_label, y.argmax(axis=1)) 41 count = (pred == t).sum() - ignore_cnt 42 total = len(t) - ignore_cnt 43 44 if total == 0: 45 return xp.asarray(0.0, dtype=y.dtype), 46 else: 47 return xp.asarray(float(count) / total, dtype=y.dtype), 48 else: 49 pred = y.argmax(axis=1) 50 return xp.asarray((pred == t).mean(dtype=y.dtype)), 51 52 53 def accuracy(y, t, ignore_label=None): 54 """Computes muticlass classification accuracy of the minibatch. 55 56 Args: 57 y (Variable): Variable holding a matrix whose (i, j)-th element 58 indicates the score of the class j at the i-th example. 59 t (Variable): Variable holding an int32 vector of ground truth labels. 60 ignore_label (int or None): Skip calculating accuracy 61 if the ture label is ``ignore_label``. 62 63 Returns: 64 Variable: A variable holding a scalar array of the accuracy. 65 66 .. note:: This function is non-differentiable. 67 68 """ 69 return Accuracy(ignore_label=ignore_label)(y, t) 70 [end of chainer/functions/evaluation/accuracy.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/functions/evaluation/accuracy.py b/chainer/functions/evaluation/accuracy.py --- a/chainer/functions/evaluation/accuracy.py +++ b/chainer/functions/evaluation/accuracy.py @@ -1,4 +1,5 @@ import numpy +import six from chainer import cuda from chainer import function @@ -16,18 +17,21 @@ type_check.expect( x_type.dtype.kind == 'f', - x_type.ndim >= 2, - t_type.dtype == numpy.int32, - t_type.ndim == 1, - t_type.shape[0] == x_type.shape[0], + t_type.dtype == numpy.int32 ) - for i in range(2, x_type.ndim.eval()): + + t_ndim = t_type.ndim.eval() + type_check.expect( + x_type.ndim >= t_type.ndim, + x_type.shape[0] == t_type.shape[0], + x_type.shape[2: t_ndim + 1] == t_type.shape[1:] + ) + for i in six.moves.range(t_ndim + 1, x_type.ndim.eval()): type_check.expect(x_type.shape[i] == 1) def forward(self, inputs): xp = cuda.get_array_module(*inputs) y, t = inputs - y = y.reshape(len(y), -1) # flatten if self.ignore_label is not None: mask = (t == self.ignore_label) @@ -37,16 +41,17 @@ # TODO(henry0312) # If cupy.where returns indexes, we could make the code better. # Also, we would need Advanced Indexing. - pred = xp.where(mask, self.ignore_label, y.argmax(axis=1)) + pred = xp.where(mask, self.ignore_label, + y.argmax(axis=1).reshape(t.shape)) count = (pred == t).sum() - ignore_cnt - total = len(t) - ignore_cnt + total = t.size - ignore_cnt if total == 0: return xp.asarray(0.0, dtype=y.dtype), else: return xp.asarray(float(count) / total, dtype=y.dtype), else: - pred = y.argmax(axis=1) + pred = y.argmax(axis=1).reshape(t.shape) return xp.asarray((pred == t).mean(dtype=y.dtype)),
{"golden_diff": "diff --git a/chainer/functions/evaluation/accuracy.py b/chainer/functions/evaluation/accuracy.py\n--- a/chainer/functions/evaluation/accuracy.py\n+++ b/chainer/functions/evaluation/accuracy.py\n@@ -1,4 +1,5 @@\n import numpy\n+import six\n \n from chainer import cuda\n from chainer import function\n@@ -16,18 +17,21 @@\n \n type_check.expect(\n x_type.dtype.kind == 'f',\n- x_type.ndim >= 2,\n- t_type.dtype == numpy.int32,\n- t_type.ndim == 1,\n- t_type.shape[0] == x_type.shape[0],\n+ t_type.dtype == numpy.int32\n )\n- for i in range(2, x_type.ndim.eval()):\n+\n+ t_ndim = t_type.ndim.eval()\n+ type_check.expect(\n+ x_type.ndim >= t_type.ndim,\n+ x_type.shape[0] == t_type.shape[0],\n+ x_type.shape[2: t_ndim + 1] == t_type.shape[1:]\n+ )\n+ for i in six.moves.range(t_ndim + 1, x_type.ndim.eval()):\n type_check.expect(x_type.shape[i] == 1)\n \n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n y, t = inputs\n- y = y.reshape(len(y), -1) # flatten\n \n if self.ignore_label is not None:\n mask = (t == self.ignore_label)\n@@ -37,16 +41,17 @@\n # TODO(henry0312)\n # If cupy.where returns indexes, we could make the code better.\n # Also, we would need Advanced Indexing.\n- pred = xp.where(mask, self.ignore_label, y.argmax(axis=1))\n+ pred = xp.where(mask, self.ignore_label,\n+ y.argmax(axis=1).reshape(t.shape))\n count = (pred == t).sum() - ignore_cnt\n- total = len(t) - ignore_cnt\n+ total = t.size - ignore_cnt\n \n if total == 0:\n return xp.asarray(0.0, dtype=y.dtype),\n else:\n return xp.asarray(float(count) / total, dtype=y.dtype),\n else:\n- pred = y.argmax(axis=1)\n+ pred = y.argmax(axis=1).reshape(t.shape)\n return xp.asarray((pred == t).mean(dtype=y.dtype)),\n", "issue": "Inconsistency of type checking between SoftmaxCrossEntropy and Accuracy\nWe often evaluate training procedure with both `softmax_cross_entropy` and `accuracy`, as default `chainer.links.Classifier` does. But there are cases `softmax_cross_entorpy(x, t)` is accepted but `accuracy(x, t)` raises error because of its type checking (and vise versa). Replicated softmax is one of a such examples (e.g. `x.shape = (B, C, N)` and `t.shape = (B, N)`). We need to reshape them manually in such a case.\n\nIt is convenient for users that shape checking of the two be more consistent.\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Accuracy(function.Function):\n\n def __init__(self, ignore_label=None):\n self.ignore_label = ignore_label\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n x_type, t_type = in_types\n\n type_check.expect(\n x_type.dtype.kind == 'f',\n x_type.ndim >= 2,\n t_type.dtype == numpy.int32,\n t_type.ndim == 1,\n t_type.shape[0] == x_type.shape[0],\n )\n for i in range(2, x_type.ndim.eval()):\n type_check.expect(x_type.shape[i] == 1)\n\n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n y, t = inputs\n y = y.reshape(len(y), -1) # flatten\n\n if self.ignore_label is not None:\n mask = (t == self.ignore_label)\n ignore_cnt = mask.sum()\n\n # will always be true when the true label is ignore_label\n # TODO(henry0312)\n # If cupy.where returns indexes, we could make the code better.\n # Also, we would need Advanced Indexing.\n pred = xp.where(mask, self.ignore_label, y.argmax(axis=1))\n count = (pred == t).sum() - ignore_cnt\n total = len(t) - ignore_cnt\n\n if total == 0:\n return xp.asarray(0.0, dtype=y.dtype),\n else:\n return xp.asarray(float(count) / total, dtype=y.dtype),\n else:\n pred = y.argmax(axis=1)\n return xp.asarray((pred == t).mean(dtype=y.dtype)),\n\n\ndef accuracy(y, t, ignore_label=None):\n \"\"\"Computes muticlass classification accuracy of the minibatch.\n\n Args:\n y (Variable): Variable holding a matrix whose (i, j)-th element\n indicates the score of the class j at the i-th example.\n t (Variable): Variable holding an int32 vector of ground truth labels.\n ignore_label (int or None): Skip calculating accuracy\n if the ture label is ``ignore_label``.\n\n Returns:\n Variable: A variable holding a scalar array of the accuracy.\n\n .. note:: This function is non-differentiable.\n\n \"\"\"\n return Accuracy(ignore_label=ignore_label)(y, t)\n", "path": "chainer/functions/evaluation/accuracy.py"}]}
1,353
544
gh_patches_debug_16874
rasdani/github-patches
git_diff
mdn__kuma-6423
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use of access tokens as query parameters in the GitHub OAuth API is deprecated **Summary** We use GitHub's OAuth API as one of MDN's sign-up/sign-in OAuth providers. We're starting to receive emails from GitHub that using the https://api.github.com/user API with the access token as a query parameter has been deprecated and that the `Authorization` header should be used instead. This occurs within `GitHubOAuth2Adapter.complete_login` method provided by `django-allauth`, but `django-allauth` has not yet fixed this (although a PR has been submitted that does -- see https://github.com/pennersr/django-allauth/pull/2458). Even if `django-allauth` fixes the issue, it wouldn't help in our case since we override this method (https://github.com/mdn/kuma/blob/266bd9d8ebf24c950037a1965b1967022fca233f/kuma/users/providers/github/views.py#L20). We need to update our overridden method to pass the token via the `Authorization` header rather than via a query parameter. **Rationale** We no longer have to concern ourselves with using a deprecated approach. **Audience** All users who sign-up/sign-in to MDN via GitHub. </issue> <code> [start of kuma/users/providers/github/views.py] 1 from allauth.account.utils import get_next_redirect_url 2 from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter 3 from allauth.socialaccount.providers.oauth2.views import (OAuth2CallbackView, 4 OAuth2LoginView) 5 6 from kuma.core.decorators import redirect_in_maintenance_mode 7 from kuma.core.urlresolvers import reverse 8 from kuma.core.utils import requests_retry_session 9 10 11 class KumaGitHubOAuth2Adapter(GitHubOAuth2Adapter): 12 """ 13 A custom GitHub OAuth adapter to be used for fetching the list 14 of private email addresses stored for the given user at GitHub. 15 16 We store those email addresses in the extra data of each account. 17 """ 18 email_url = 'https://api.github.com/user/emails' 19 20 def complete_login(self, request, app, token, **kwargs): 21 session = requests_retry_session() 22 params = {'access_token': token.token} 23 profile_data = session.get(self.profile_url, params=params) 24 profile_data.raise_for_status() 25 extra_data = profile_data.json() 26 email_data = session.get(self.email_url, params=params) 27 email_data.raise_for_status() 28 extra_data['email_addresses'] = email_data.json() 29 return self.get_provider().sociallogin_from_response(request, 30 extra_data) 31 32 33 class KumaOAuth2LoginView(OAuth2LoginView): 34 35 def dispatch(self, request): 36 next_url = (get_next_redirect_url(request) or 37 reverse('users.my_edit_page')) 38 request.session['sociallogin_next_url'] = next_url 39 request.session.modified = True 40 return super(KumaOAuth2LoginView, self).dispatch(request) 41 42 43 oauth2_login = redirect_in_maintenance_mode( 44 KumaOAuth2LoginView.adapter_view(KumaGitHubOAuth2Adapter) 45 ) 46 oauth2_callback = redirect_in_maintenance_mode( 47 OAuth2CallbackView.adapter_view(KumaGitHubOAuth2Adapter) 48 ) 49 [end of kuma/users/providers/github/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kuma/users/providers/github/views.py b/kuma/users/providers/github/views.py --- a/kuma/users/providers/github/views.py +++ b/kuma/users/providers/github/views.py @@ -19,11 +19,11 @@ def complete_login(self, request, app, token, **kwargs): session = requests_retry_session() - params = {'access_token': token.token} - profile_data = session.get(self.profile_url, params=params) + headers = {'Authorization': f'token {token.token}'} + profile_data = session.get(self.profile_url, headers=headers) profile_data.raise_for_status() extra_data = profile_data.json() - email_data = session.get(self.email_url, params=params) + email_data = session.get(self.email_url, headers=headers) email_data.raise_for_status() extra_data['email_addresses'] = email_data.json() return self.get_provider().sociallogin_from_response(request,
{"golden_diff": "diff --git a/kuma/users/providers/github/views.py b/kuma/users/providers/github/views.py\n--- a/kuma/users/providers/github/views.py\n+++ b/kuma/users/providers/github/views.py\n@@ -19,11 +19,11 @@\n \n def complete_login(self, request, app, token, **kwargs):\n session = requests_retry_session()\n- params = {'access_token': token.token}\n- profile_data = session.get(self.profile_url, params=params)\n+ headers = {'Authorization': f'token {token.token}'}\n+ profile_data = session.get(self.profile_url, headers=headers)\n profile_data.raise_for_status()\n extra_data = profile_data.json()\n- email_data = session.get(self.email_url, params=params)\n+ email_data = session.get(self.email_url, headers=headers)\n email_data.raise_for_status()\n extra_data['email_addresses'] = email_data.json()\n return self.get_provider().sociallogin_from_response(request,\n", "issue": "Use of access tokens as query parameters in the GitHub OAuth API is deprecated\n**Summary**\r\nWe use GitHub's OAuth API as one of MDN's sign-up/sign-in OAuth providers. We're starting to receive emails from GitHub that using the https://api.github.com/user API with the access token as a query parameter has been deprecated and that the `Authorization` header should be used instead. This occurs within `GitHubOAuth2Adapter.complete_login` method provided by `django-allauth`, but `django-allauth` has not yet fixed this (although a PR has been submitted that does -- see https://github.com/pennersr/django-allauth/pull/2458). Even if `django-allauth` fixes the issue, it wouldn't help in our case since we override this method (https://github.com/mdn/kuma/blob/266bd9d8ebf24c950037a1965b1967022fca233f/kuma/users/providers/github/views.py#L20). We need to update our overridden method to pass the token via the `Authorization` header rather than via a query parameter.\r\n\r\n**Rationale**\r\nWe no longer have to concern ourselves with using a deprecated approach.\r\n\r\n**Audience**\r\nAll users who sign-up/sign-in to MDN via GitHub.\r\n\n", "before_files": [{"content": "from allauth.account.utils import get_next_redirect_url\nfrom allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter\nfrom allauth.socialaccount.providers.oauth2.views import (OAuth2CallbackView,\n OAuth2LoginView)\n\nfrom kuma.core.decorators import redirect_in_maintenance_mode\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import requests_retry_session\n\n\nclass KumaGitHubOAuth2Adapter(GitHubOAuth2Adapter):\n \"\"\"\n A custom GitHub OAuth adapter to be used for fetching the list\n of private email addresses stored for the given user at GitHub.\n\n We store those email addresses in the extra data of each account.\n \"\"\"\n email_url = 'https://api.github.com/user/emails'\n\n def complete_login(self, request, app, token, **kwargs):\n session = requests_retry_session()\n params = {'access_token': token.token}\n profile_data = session.get(self.profile_url, params=params)\n profile_data.raise_for_status()\n extra_data = profile_data.json()\n email_data = session.get(self.email_url, params=params)\n email_data.raise_for_status()\n extra_data['email_addresses'] = email_data.json()\n return self.get_provider().sociallogin_from_response(request,\n extra_data)\n\n\nclass KumaOAuth2LoginView(OAuth2LoginView):\n\n def dispatch(self, request):\n next_url = (get_next_redirect_url(request) or\n reverse('users.my_edit_page'))\n request.session['sociallogin_next_url'] = next_url\n request.session.modified = True\n return super(KumaOAuth2LoginView, self).dispatch(request)\n\n\noauth2_login = redirect_in_maintenance_mode(\n KumaOAuth2LoginView.adapter_view(KumaGitHubOAuth2Adapter)\n)\noauth2_callback = redirect_in_maintenance_mode(\n OAuth2CallbackView.adapter_view(KumaGitHubOAuth2Adapter)\n)\n", "path": "kuma/users/providers/github/views.py"}]}
1,309
206
gh_patches_debug_59499
rasdani/github-patches
git_diff
Lightning-AI__pytorch-lightning-274
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Make test_tube optional **Is your feature request related to a problem? Please describe.** `test_tube` is only needed when using `TestTubeLogger`, thus it should not be an install requirement like `mlflow`. </issue> <code> [start of pytorch_lightning/logging/__init__.py] 1 from .base import LightningLoggerBase, rank_zero_only 2 from .test_tube_logger import TestTubeLogger 3 4 try: 5 from .mlflow_logger import MLFlowLogger 6 except ModuleNotFoundError: 7 pass 8 [end of pytorch_lightning/logging/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pytorch_lightning/logging/__init__.py b/pytorch_lightning/logging/__init__.py --- a/pytorch_lightning/logging/__init__.py +++ b/pytorch_lightning/logging/__init__.py @@ -1,6 +1,9 @@ from .base import LightningLoggerBase, rank_zero_only -from .test_tube_logger import TestTubeLogger +try: + from .test_tube_logger import TestTubeLogger +except ModuleNotFoundError: + pass try: from .mlflow_logger import MLFlowLogger except ModuleNotFoundError:
{"golden_diff": "diff --git a/pytorch_lightning/logging/__init__.py b/pytorch_lightning/logging/__init__.py\n--- a/pytorch_lightning/logging/__init__.py\n+++ b/pytorch_lightning/logging/__init__.py\n@@ -1,6 +1,9 @@\n from .base import LightningLoggerBase, rank_zero_only\n-from .test_tube_logger import TestTubeLogger\n \n+try:\n+ from .test_tube_logger import TestTubeLogger\n+except ModuleNotFoundError:\n+ pass\n try:\n from .mlflow_logger import MLFlowLogger\n except ModuleNotFoundError:\n", "issue": "Make test_tube optional\n**Is your feature request related to a problem? Please describe.**\r\n`test_tube` is only needed when using `TestTubeLogger`, thus it should not be an install requirement like `mlflow`.\n", "before_files": [{"content": "from .base import LightningLoggerBase, rank_zero_only\nfrom .test_tube_logger import TestTubeLogger\n\ntry:\n from .mlflow_logger import MLFlowLogger\nexcept ModuleNotFoundError:\n pass\n", "path": "pytorch_lightning/logging/__init__.py"}]}
644
124
gh_patches_debug_31852
rasdani/github-patches
git_diff
spacetelescope__jwql-63
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Build filename parser utility function It would be useful to have a function in the `utils.py` module that returned the individual elements of a given filename, for example: ```python from jwql.utils.utils import parse_filename filename_dict = parse_filename('jw94015001001_02102_00001_nrcb1_uncal.fits') ``` where `filename_dict` is: ``` { 'program_id' : '94015', 'observation' : '001', 'visit' : '001', 'visit_group' : '02', 'parallel_seq_id' : '1', 'activity' : '02', 'exposure_id' : '00001', 'detector' : 'nrcb1', 'suffix' : 'uncal' } ``` </issue> <code> [start of jwql/utils/utils.py] 1 """Various utility functions for the jwql project. 2 3 Authors 4 ------- 5 6 Matthew Bourque 7 8 Use 9 --- 10 11 This module can be imported as such: 12 13 >>> import utils 14 settings = get_config() 15 """ 16 17 import json 18 19 20 def get_config(): 21 """Return a dictionary that holds the contents of the jwql config 22 file. 23 24 Returns 25 ------- 26 settings : dict 27 A dictionary that holds the contents of the config file. 28 """ 29 30 with open('config.json', 'r') as config_file: 31 settings = json.load(config_file) 32 33 return settings 34 [end of jwql/utils/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/jwql/utils/utils.py b/jwql/utils/utils.py --- a/jwql/utils/utils.py +++ b/jwql/utils/utils.py @@ -3,7 +3,8 @@ Authors ------- - Matthew Bourque + - Matthew Bourque + - Lauren Chambers Use --- @@ -12,9 +13,19 @@ >>> import utils settings = get_config() + +References +---------- + + Filename parser modifed from Joe Hunkeler: + https://gist.github.com/jhunkeler/f08783ca2da7bfd1f8e9ee1d207da5ff """ import json +import os +import re + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) def get_config(): @@ -27,7 +38,50 @@ A dictionary that holds the contents of the config file. """ - with open('config.json', 'r') as config_file: + with open(os.path.join(__location__, 'config.json'), 'r') as config_file: settings = json.load(config_file) return settings + + +def filename_parser(filename): + """Return a dictionary that contains the properties of a given + JWST file (e.g. program ID, visit number, detector, etc.) + + Parameters + ---------- + filename : str + Path or name of JWST file to parse + + Returns + ------- + filename_dict : dict + Collection of file properties + + Raises + ------ + ValueError + When the provided file does not follow naming conventions + """ + filename = os.path.basename(filename) + + elements = \ + re.compile(r"[a-z]+" + "(?P<program_id>\d{5})" + "(?P<observation>\d{3})" + "(?P<visit>\d{3})" + "_(?P<visit_group>\d{2})" + "(?P<parallel_seq_id>\d{1})" + "(?P<activity>\d{2})" + "_(?P<exposure_id>\d+)" + "_(?P<detector>\w+)" + "_(?P<suffix>\w+).*") + + jwst_file = elements.match(filename) + + if jwst_file is not None: + filename_dict = jwst_file.groupdict() + else: + raise ValueError('Provided file {} does not follow JWST naming conventions (jw<PPPPP><OOO><VVV>_<GGSAA>_<EEEEE>_<detector>_<suffix>.fits)'.format(filename)) + + return filename_dict
{"golden_diff": "diff --git a/jwql/utils/utils.py b/jwql/utils/utils.py\n--- a/jwql/utils/utils.py\n+++ b/jwql/utils/utils.py\n@@ -3,7 +3,8 @@\n Authors\n -------\n \n- Matthew Bourque\n+ - Matthew Bourque\n+ - Lauren Chambers\n \n Use\n ---\n@@ -12,9 +13,19 @@\n \n >>> import utils\n settings = get_config()\n+\n+References\n+----------\n+\n+ Filename parser modifed from Joe Hunkeler:\n+ https://gist.github.com/jhunkeler/f08783ca2da7bfd1f8e9ee1d207da5ff\n \"\"\"\n \n import json\n+import os\n+import re\n+\n+__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n \n \n def get_config():\n@@ -27,7 +38,50 @@\n A dictionary that holds the contents of the config file.\n \"\"\"\n \n- with open('config.json', 'r') as config_file:\n+ with open(os.path.join(__location__, 'config.json'), 'r') as config_file:\n settings = json.load(config_file)\n \n return settings\n+\n+\n+def filename_parser(filename):\n+ \"\"\"Return a dictionary that contains the properties of a given\n+ JWST file (e.g. program ID, visit number, detector, etc.)\n+\n+ Parameters\n+ ----------\n+ filename : str\n+ Path or name of JWST file to parse\n+\n+ Returns\n+ -------\n+ filename_dict : dict\n+ Collection of file properties\n+\n+ Raises\n+ ------\n+ ValueError\n+ When the provided file does not follow naming conventions\n+ \"\"\"\n+ filename = os.path.basename(filename)\n+\n+ elements = \\\n+ re.compile(r\"[a-z]+\"\n+ \"(?P<program_id>\\d{5})\"\n+ \"(?P<observation>\\d{3})\"\n+ \"(?P<visit>\\d{3})\"\n+ \"_(?P<visit_group>\\d{2})\"\n+ \"(?P<parallel_seq_id>\\d{1})\"\n+ \"(?P<activity>\\d{2})\"\n+ \"_(?P<exposure_id>\\d+)\"\n+ \"_(?P<detector>\\w+)\"\n+ \"_(?P<suffix>\\w+).*\")\n+\n+ jwst_file = elements.match(filename)\n+\n+ if jwst_file is not None:\n+ filename_dict = jwst_file.groupdict()\n+ else:\n+ raise ValueError('Provided file {} does not follow JWST naming conventions (jw<PPPPP><OOO><VVV>_<GGSAA>_<EEEEE>_<detector>_<suffix>.fits)'.format(filename))\n+\n+ return filename_dict\n", "issue": "Build filename parser utility function\nIt would be useful to have a function in the `utils.py` module that returned the individual elements of a given filename, for example:\r\n\r\n```python\r\nfrom jwql.utils.utils import parse_filename\r\nfilename_dict = parse_filename('jw94015001001_02102_00001_nrcb1_uncal.fits')\r\n```\r\n\r\nwhere `filename_dict` is:\r\n\r\n```\r\n{\r\n 'program_id' : '94015',\r\n 'observation' : '001',\r\n 'visit' : '001',\r\n 'visit_group' : '02',\r\n 'parallel_seq_id' : '1',\r\n 'activity' : '02',\r\n 'exposure_id' : '00001',\r\n 'detector' : 'nrcb1',\r\n 'suffix' : 'uncal'\r\n}\r\n```\n", "before_files": [{"content": "\"\"\"Various utility functions for the jwql project.\n\nAuthors\n-------\n\n Matthew Bourque\n\nUse\n---\n\n This module can be imported as such:\n\n >>> import utils\n settings = get_config()\n\"\"\"\n\nimport json\n\n\ndef get_config():\n \"\"\"Return a dictionary that holds the contents of the jwql config\n file.\n\n Returns\n -------\n settings : dict\n A dictionary that holds the contents of the config file.\n \"\"\"\n\n with open('config.json', 'r') as config_file:\n settings = json.load(config_file)\n\n return settings\n", "path": "jwql/utils/utils.py"}]}
924
627
gh_patches_debug_20806
rasdani/github-patches
git_diff
pwndbg__pwndbg-416
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> utf8 decoding error ### Description Context is not correctly displayed due to utf8 decoding error. Traceback (most recent call last): File "/home/carstein/Tools/pwndbg/pwndbg/commands/__init__.py", line 109, in __call__ return self.function(*args, **kwargs) File "/home/carstein/Tools/pwndbg/pwndbg/commands/__init__.py", line 200, in _OnlyWhenRunning return function(*a, **kw) File "/home/carstein/Tools/pwndbg/pwndbg/commands/context.py", line 86, in context result.extend(func()) File "/home/carstein/Tools/pwndbg/pwndbg/commands/context.py", line 98, in context_regs return [pwndbg.ui.banner("registers")] + get_regs() File "/home/carstein/Tools/pwndbg/pwndbg/ui.py", line 52, in banner banner = ljust_colored(banner, width, str(config.banner_separator)) File "/home/carstein/Tools/pwndbg/pwndbg/color/__init__.py", line 77, in ljust_colored return x + ((remaining // len(char) + 1) * char)[:remaining] UnicodeDecodeError: 'utf8' codec can't decode byte 0xe2 in position 105: unexpected end of data ### Steps to reproduce 1. Load binary into gdb 2. `break main` 3. `run` ### My setup pwndbg version: Gdb: GNU gdb (GDB) 8.1 Python: 2.7.13 (default, Nov 24 2017, 17:33:09) [GCC 6.3.0 20170516] Pwndbg: 1.0.0 build: 869e832 Capstone: 4.0.1024 Unicorn: 1.0.1 </issue> <code> [start of pwndbg/color/__init__.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 from __future__ import absolute_import 4 from __future__ import division 5 from __future__ import print_function 6 from __future__ import unicode_literals 7 8 import re 9 10 import pwndbg.memoize 11 12 NORMAL = "\x1b[0m" 13 BLACK = "\x1b[30m" 14 RED = "\x1b[31m" 15 GREEN = "\x1b[32m" 16 YELLOW = "\x1b[33m" 17 BLUE = "\x1b[34m" 18 PURPLE = "\x1b[35m" 19 CYAN = "\x1b[36m" 20 LIGHT_GREY = LIGHT_GRAY = "\x1b[37m" 21 FOREGROUND = "\x1b[39m" 22 GREY = GRAY = "\x1b[90m" 23 LIGHT_RED = "\x1b[91m" 24 LIGHT_GREEN = "\x1b[92m" 25 LIGHT_YELLOW = "\x1b[93m" 26 LIGHT_BLUE = "\x1b[94m" 27 LIGHT_PURPLE = "\x1b[95m" 28 LIGHT_CYAN = "\x1b[96m" 29 WHITE = "\x1b[97m" 30 BOLD = "\x1b[1m" 31 UNDERLINE = "\x1b[4m" 32 33 def none(x): return str(x) 34 def normal(x): return colorize(x, NORMAL) 35 def black(x): return colorize(x, BLACK) 36 def red(x): return colorize(x, RED) 37 def green(x): return colorize(x, GREEN) 38 def yellow(x): return colorize(x, YELLOW) 39 def blue(x): return colorize(x, BLUE) 40 def purple(x): return colorize(x, PURPLE) 41 def cyan(x): return colorize(x, CYAN) 42 def light_gray(x): return colorize(x, LIGHT_GRAY) 43 def foreground(x): return colorize(x, FOREGROUND) 44 def gray(x): return colorize(x, GRAY) 45 def light_red(x): return colorize(x, LIGHT_RED) 46 def light_green(x): return colorize(x, LIGHT_GREEN) 47 def light_yellow(x): return colorize(x, LIGHT_YELLOW) 48 def light_blue(x): return colorize(x, LIGHT_BLUE) 49 def light_purple(x): return colorize(x, LIGHT_PURPLE) 50 def light_cyan(x): return colorize(x, LIGHT_CYAN) 51 def white(x): return colorize(x, WHITE) 52 def bold(x): return colorize(x, BOLD) 53 def underline(x): return colorize(x, UNDERLINE) 54 def colorize(x, color): return color + terminateWith(str(x), color) + NORMAL 55 56 @pwndbg.memoize.reset_on_stop 57 def generateColorFunctionInner(old, new): 58 def wrapper(text): 59 return new(old(text)) 60 return wrapper 61 62 def generateColorFunction(config): 63 function = lambda x: x 64 for color in str(config).split(','): 65 function = generateColorFunctionInner(function, globals()[color.lower().replace('-', '_')]) 66 return function 67 68 def strip(x): 69 return re.sub('\x1b\\[\d+m', '', x) 70 71 def terminateWith(x, color): 72 return re.sub('\x1b\\[0m', NORMAL + color, x) 73 74 def ljust_colored(x, length, char=' '): 75 remaining = length - len(strip(x)) 76 return x + ((remaining // len(char) + 1) * char)[:remaining] 77 78 def rjust_colored(x, length, char=' '): 79 remaining = length - len(strip(x)) 80 return ((remaining // len(char) + 1) * char)[:remaining] + x 81 [end of pwndbg/color/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwndbg/color/__init__.py b/pwndbg/color/__init__.py --- a/pwndbg/color/__init__.py +++ b/pwndbg/color/__init__.py @@ -7,6 +7,8 @@ import re +import six + import pwndbg.memoize NORMAL = "\x1b[0m" @@ -72,9 +74,17 @@ return re.sub('\x1b\\[0m', NORMAL + color, x) def ljust_colored(x, length, char=' '): + # TODO: workaround until issue #404 + if six.PY2: + x = x if isinstance(x, six.text_type) else x.decode('utf8') + char = char if isinstance(char, six.text_type) else char.decode('utf8') remaining = length - len(strip(x)) return x + ((remaining // len(char) + 1) * char)[:remaining] def rjust_colored(x, length, char=' '): + # TODO: workaround until issue #404 + if six.PY2: + x = x if isinstance(x, six.text_type) else x.decode('utf8') + char = char if isinstance(char, six.text_type) else char.decode('utf8') remaining = length - len(strip(x)) return ((remaining // len(char) + 1) * char)[:remaining] + x
{"golden_diff": "diff --git a/pwndbg/color/__init__.py b/pwndbg/color/__init__.py\n--- a/pwndbg/color/__init__.py\n+++ b/pwndbg/color/__init__.py\n@@ -7,6 +7,8 @@\n \n import re\n \n+import six\n+\n import pwndbg.memoize\n \n NORMAL = \"\\x1b[0m\"\n@@ -72,9 +74,17 @@\n return re.sub('\\x1b\\\\[0m', NORMAL + color, x)\n \n def ljust_colored(x, length, char=' '):\n+ # TODO: workaround until issue #404\n+ if six.PY2:\n+ x = x if isinstance(x, six.text_type) else x.decode('utf8')\n+ char = char if isinstance(char, six.text_type) else char.decode('utf8')\n remaining = length - len(strip(x))\n return x + ((remaining // len(char) + 1) * char)[:remaining]\n \n def rjust_colored(x, length, char=' '):\n+ # TODO: workaround until issue #404\n+ if six.PY2:\n+ x = x if isinstance(x, six.text_type) else x.decode('utf8')\n+ char = char if isinstance(char, six.text_type) else char.decode('utf8')\n remaining = length - len(strip(x))\n return ((remaining // len(char) + 1) * char)[:remaining] + x\n", "issue": "utf8 decoding error\n### Description\r\nContext is not correctly displayed due to utf8 decoding error.\r\n\r\nTraceback (most recent call last):\r\n File \"/home/carstein/Tools/pwndbg/pwndbg/commands/__init__.py\", line 109, in __call__\r\n return self.function(*args, **kwargs)\r\n File \"/home/carstein/Tools/pwndbg/pwndbg/commands/__init__.py\", line 200, in _OnlyWhenRunning\r\n return function(*a, **kw)\r\n File \"/home/carstein/Tools/pwndbg/pwndbg/commands/context.py\", line 86, in context\r\n result.extend(func())\r\n File \"/home/carstein/Tools/pwndbg/pwndbg/commands/context.py\", line 98, in context_regs\r\n return [pwndbg.ui.banner(\"registers\")] + get_regs()\r\n File \"/home/carstein/Tools/pwndbg/pwndbg/ui.py\", line 52, in banner\r\n banner = ljust_colored(banner, width, str(config.banner_separator))\r\n File \"/home/carstein/Tools/pwndbg/pwndbg/color/__init__.py\", line 77, in ljust_colored\r\n return x + ((remaining // len(char) + 1) * char)[:remaining]\r\nUnicodeDecodeError: 'utf8' codec can't decode byte 0xe2 in position 105: unexpected end of data\r\n\r\n\r\n### Steps to reproduce\r\n1. Load binary into gdb\r\n2. `break main`\r\n3. `run`\r\n\r\n\r\n### My setup\r\npwndbg version: \r\nGdb: GNU gdb (GDB) 8.1\r\nPython: 2.7.13 (default, Nov 24 2017, 17:33:09) [GCC 6.3.0 20170516]\r\nPwndbg: 1.0.0 build: 869e832\r\nCapstone: 4.0.1024\r\nUnicorn: 1.0.1\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport re\n\nimport pwndbg.memoize\n\nNORMAL = \"\\x1b[0m\"\nBLACK = \"\\x1b[30m\"\nRED = \"\\x1b[31m\"\nGREEN = \"\\x1b[32m\"\nYELLOW = \"\\x1b[33m\"\nBLUE = \"\\x1b[34m\"\nPURPLE = \"\\x1b[35m\"\nCYAN = \"\\x1b[36m\"\nLIGHT_GREY = LIGHT_GRAY = \"\\x1b[37m\"\nFOREGROUND = \"\\x1b[39m\"\nGREY = GRAY = \"\\x1b[90m\"\nLIGHT_RED = \"\\x1b[91m\"\nLIGHT_GREEN = \"\\x1b[92m\"\nLIGHT_YELLOW = \"\\x1b[93m\"\nLIGHT_BLUE = \"\\x1b[94m\"\nLIGHT_PURPLE = \"\\x1b[95m\"\nLIGHT_CYAN = \"\\x1b[96m\"\nWHITE = \"\\x1b[97m\"\nBOLD = \"\\x1b[1m\"\nUNDERLINE = \"\\x1b[4m\"\n\ndef none(x): return str(x)\ndef normal(x): return colorize(x, NORMAL)\ndef black(x): return colorize(x, BLACK)\ndef red(x): return colorize(x, RED)\ndef green(x): return colorize(x, GREEN)\ndef yellow(x): return colorize(x, YELLOW)\ndef blue(x): return colorize(x, BLUE)\ndef purple(x): return colorize(x, PURPLE)\ndef cyan(x): return colorize(x, CYAN)\ndef light_gray(x): return colorize(x, LIGHT_GRAY)\ndef foreground(x): return colorize(x, FOREGROUND)\ndef gray(x): return colorize(x, GRAY)\ndef light_red(x): return colorize(x, LIGHT_RED)\ndef light_green(x): return colorize(x, LIGHT_GREEN)\ndef light_yellow(x): return colorize(x, LIGHT_YELLOW)\ndef light_blue(x): return colorize(x, LIGHT_BLUE)\ndef light_purple(x): return colorize(x, LIGHT_PURPLE)\ndef light_cyan(x): return colorize(x, LIGHT_CYAN)\ndef white(x): return colorize(x, WHITE)\ndef bold(x): return colorize(x, BOLD)\ndef underline(x): return colorize(x, UNDERLINE)\ndef colorize(x, color): return color + terminateWith(str(x), color) + NORMAL\n\[email protected]_on_stop\ndef generateColorFunctionInner(old, new):\n def wrapper(text):\n return new(old(text))\n return wrapper\n\ndef generateColorFunction(config):\n function = lambda x: x\n for color in str(config).split(','):\n function = generateColorFunctionInner(function, globals()[color.lower().replace('-', '_')])\n return function\n\ndef strip(x):\n return re.sub('\\x1b\\\\[\\d+m', '', x)\n\ndef terminateWith(x, color):\n return re.sub('\\x1b\\\\[0m', NORMAL + color, x)\n\ndef ljust_colored(x, length, char=' '):\n remaining = length - len(strip(x))\n return x + ((remaining // len(char) + 1) * char)[:remaining]\n\ndef rjust_colored(x, length, char=' '):\n remaining = length - len(strip(x))\n return ((remaining // len(char) + 1) * char)[:remaining] + x\n", "path": "pwndbg/color/__init__.py"}]}
1,955
321
gh_patches_debug_17123
rasdani/github-patches
git_diff
localstack__localstack-9732
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> bug: Long Running Lambda Fails StepFunction State Machine Execution ### Is there an existing issue for this? - [X] I have searched the existing issues ### Current Behavior As of `v3.0.0` and `v3.0.1`, StepFunction StateMachines that have long-running Lambda tasks fail execution. It also looks like the StateMachine then retries by re-invoking the lambda 3 times in the background with a 1 minute gap in between invocations. Unfortunately, the state machine will have already failed execution by this point and these lambda runs fail when they try to update the state. The lambda is started successfully, but then fails with a timeout after 3 seconds: ``` 2023-11-24T22:09:56.758 ERROR --- [ad-35 (eval)] l.s.s.a.c.eval_component : Exception=FailureEventException, Error=Exception, Details={"taskFailedEventDetails": {"error": "Exception", "cause": "{\"errorMessage\":\"2023-11-24T22:09:56Z dbd4767f-32b8-46b7-9ef4-382ee583ad0a Task timed out after 3.00 seconds\"}", "resource": "invoke", "resourceType": "lambda"}} at '(StateTaskServiceLambda| {'comment': None, 'input_path': (InputPath| {'input_path_src': '$'}, 'output_path': (OutputPath| {'output_path': '$'}, 'state_entered_event_type': 'TaskStateEntered', 'state_exited_event_type': 'TaskStateExited', 'result_path': None, 'result_selector': None, 'retry': (RetryDecl| {'retriers': [(RetrierDecl| {'error_equals': (ErrorEqualsDecl| {'error_names': [(CustomErrorName| {'error_name': 'Lambda.ClientExecutionTimeoutException'}, (CustomErrorName| {'error_name': 'Lambda.ServiceException'}, (CustomErrorName| {'error_name': 'Lambda.AWSLambdaException'}, (CustomErrorName| {'error_name': 'Lambda.SdkClientException'}]}, 'interval_seconds': (IntervalSecondsDecl| {'seconds': 2}, 'max_attempts': (MaxAttemptsDecl| {'attempts': 6}, 'backoff_rate': (BackoffRateDecl| {'rate': 2.0}, '_attempts_counter': 0, '_next_interval_seconds': 2}]}, 'catch': None, 'timeout': (TimeoutSeconds| {'timeout_seconds': 99999999, 'is_default': None}, 'heartbeat': None, 'parameters': (Parameters| {'payload_tmpl': (PayloadTmpl| {'payload_bindings': [(PayloadBindingValue| {'field': 'FunctionName', 'value': (PayloadValueStr| {'val': 'arn:aws:lambda:us-east-1:000000000000:function:TestAppStack-lambdaslongrunning51EEA4-b04d9aee'}}, (PayloadBindingPath| {'field': 'Payload', 'path': '$'}]}}, 'name': 'long-running-task', 'state_type': <StateType.Task: 15>, 'continue_with': <localstack.services.stepfunctions.asl.component.state.state_continue_with.ContinueWithEnd object at 0xfffee6793b90>, 'resource': (ServiceResource| {'_region': '', '_account': '', 'resource_arn': 'arn:aws:states:::lambda:invoke', 'partition': 'aws', 'service_name': 'lambda', 'api_name': 'lambda', 'api_action': 'invoke', 'condition': None}}' ``` Even if I specify long timeouts on both the Lambda and the LambdaTask the state machine still fails the task after 3 seconds. This was working in version 2, and if I use the old StepFunctions provider, the StateMachine completes successfully. ### Expected Behavior The State Machine should finish successfully because the long running lambda finishes before the timeout. ### How are you starting LocalStack? With a docker-compose file ### Steps To Reproduce #### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`) I've created a repository that demonstrates the bug: https://github.com/noseworthy/localstack-sfn-bugs. I'm using localstack pro, so your terminal must have `LOCALSTACK_AUTH_TOKEN` specified. This should work with non-pro localstack however. You just need to modify the `compose.yaml` file. 1. Start localstack using docker-compose: `docker compose up --force-recreate --build -d` #### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands) 1. Install dependencies: `yarn install` 2. Bootstrap the CDK project: `yarn cdklocal bootstrap` 3. Deploy the CDK project: `yarn cdklocal deploy` 4. Trigger the state machine: `yarn trigger` Watch as the statemachine tries to execute, but fails saying that the long running lambda timed out after 3.00 seconds. ### Environment ```markdown - OS: macOS Sonoma 14.1.1 (23B81) - LocalStack: v3.0.1 Pro Docker Image ``` ### Anything else? Demo Repository: https://github.com/noseworthy/localstack-sfn-bugs </issue> <code> [start of localstack/services/stepfunctions/asl/utils/boto_client.py] 1 from botocore.client import BaseClient 2 from botocore.config import Config 3 4 from localstack.aws.connect import connect_to 5 6 7 def boto_client_for(region: str, account: str, service: str) -> BaseClient: 8 return connect_to.get_client( 9 aws_access_key_id=account, 10 region_name=region, 11 service_name=service, 12 config=Config(parameter_validation=False), 13 ) 14 [end of localstack/services/stepfunctions/asl/utils/boto_client.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/localstack/services/stepfunctions/asl/utils/boto_client.py b/localstack/services/stepfunctions/asl/utils/boto_client.py --- a/localstack/services/stepfunctions/asl/utils/boto_client.py +++ b/localstack/services/stepfunctions/asl/utils/boto_client.py @@ -2,6 +2,7 @@ from botocore.config import Config from localstack.aws.connect import connect_to +from localstack.services.stepfunctions.asl.component.common.timeouts.timeout import TimeoutSeconds def boto_client_for(region: str, account: str, service: str) -> BaseClient: @@ -9,5 +10,10 @@ aws_access_key_id=account, region_name=region, service_name=service, - config=Config(parameter_validation=False), + config=Config( + parameter_validation=False, + retries={"max_attempts": 0, "total_max_attempts": 1}, + connect_timeout=TimeoutSeconds.DEFAULT_TIMEOUT_SECONDS, + read_timeout=TimeoutSeconds.DEFAULT_TIMEOUT_SECONDS, + ), )
{"golden_diff": "diff --git a/localstack/services/stepfunctions/asl/utils/boto_client.py b/localstack/services/stepfunctions/asl/utils/boto_client.py\n--- a/localstack/services/stepfunctions/asl/utils/boto_client.py\n+++ b/localstack/services/stepfunctions/asl/utils/boto_client.py\n@@ -2,6 +2,7 @@\n from botocore.config import Config\n \n from localstack.aws.connect import connect_to\n+from localstack.services.stepfunctions.asl.component.common.timeouts.timeout import TimeoutSeconds\n \n \n def boto_client_for(region: str, account: str, service: str) -> BaseClient:\n@@ -9,5 +10,10 @@\n aws_access_key_id=account,\n region_name=region,\n service_name=service,\n- config=Config(parameter_validation=False),\n+ config=Config(\n+ parameter_validation=False,\n+ retries={\"max_attempts\": 0, \"total_max_attempts\": 1},\n+ connect_timeout=TimeoutSeconds.DEFAULT_TIMEOUT_SECONDS,\n+ read_timeout=TimeoutSeconds.DEFAULT_TIMEOUT_SECONDS,\n+ ),\n )\n", "issue": "bug: Long Running Lambda Fails StepFunction State Machine Execution\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Current Behavior\r\n\r\nAs of `v3.0.0` and `v3.0.1`, StepFunction StateMachines that have long-running Lambda tasks fail execution. It also looks like the StateMachine then retries by re-invoking the lambda 3 times in the background with a 1 minute gap in between invocations. Unfortunately, the state machine will have already failed execution by this point and these lambda runs fail when they try to update the state.\r\n\r\nThe lambda is started successfully, but then fails with a timeout after 3 seconds:\r\n\r\n```\r\n2023-11-24T22:09:56.758 ERROR --- [ad-35 (eval)] l.s.s.a.c.eval_component : Exception=FailureEventException, Error=Exception, Details={\"taskFailedEventDetails\": {\"error\": \"Exception\", \"cause\": \"{\\\"errorMessage\\\":\\\"2023-11-24T22:09:56Z dbd4767f-32b8-46b7-9ef4-382ee583ad0a Task timed out after 3.00 seconds\\\"}\", \"resource\": \"invoke\", \"resourceType\": \"lambda\"}} at '(StateTaskServiceLambda| {'comment': None, 'input_path': (InputPath| {'input_path_src': '$'}, 'output_path': (OutputPath| {'output_path': '$'}, 'state_entered_event_type': 'TaskStateEntered', 'state_exited_event_type': 'TaskStateExited', 'result_path': None, 'result_selector': None, 'retry': (RetryDecl| {'retriers': [(RetrierDecl| {'error_equals': (ErrorEqualsDecl| {'error_names': [(CustomErrorName| {'error_name': 'Lambda.ClientExecutionTimeoutException'}, (CustomErrorName| {'error_name': 'Lambda.ServiceException'}, (CustomErrorName| {'error_name': 'Lambda.AWSLambdaException'}, (CustomErrorName| {'error_name': 'Lambda.SdkClientException'}]}, 'interval_seconds': (IntervalSecondsDecl| {'seconds': 2}, 'max_attempts': (MaxAttemptsDecl| {'attempts': 6}, 'backoff_rate': (BackoffRateDecl| {'rate': 2.0}, '_attempts_counter': 0, '_next_interval_seconds': 2}]}, 'catch': None, 'timeout': (TimeoutSeconds| {'timeout_seconds': 99999999, 'is_default': None}, 'heartbeat': None, 'parameters': (Parameters| {'payload_tmpl': (PayloadTmpl| {'payload_bindings': [(PayloadBindingValue| {'field': 'FunctionName', 'value': (PayloadValueStr| {'val': 'arn:aws:lambda:us-east-1:000000000000:function:TestAppStack-lambdaslongrunning51EEA4-b04d9aee'}}, (PayloadBindingPath| {'field': 'Payload', 'path': '$'}]}}, 'name': 'long-running-task', 'state_type': <StateType.Task: 15>, 'continue_with': <localstack.services.stepfunctions.asl.component.state.state_continue_with.ContinueWithEnd object at 0xfffee6793b90>, 'resource': (ServiceResource| {'_region': '', '_account': '', 'resource_arn': 'arn:aws:states:::lambda:invoke', 'partition': 'aws', 'service_name': 'lambda', 'api_name': 'lambda', 'api_action': 'invoke', 'condition': None}}'\r\n```\r\n\r\nEven if I specify long timeouts on both the Lambda and the LambdaTask the state machine still fails the task after 3 seconds. This was working in version 2, and if I use the old StepFunctions provider, the StateMachine completes successfully.\r\n\r\n### Expected Behavior\r\n\r\nThe State Machine should finish successfully because the long running lambda finishes before the timeout.\r\n\r\n### How are you starting LocalStack?\r\n\r\nWith a docker-compose file\r\n\r\n### Steps To Reproduce\r\n\r\n#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)\r\n\r\nI've created a repository that demonstrates the bug: https://github.com/noseworthy/localstack-sfn-bugs. I'm using localstack pro, so your terminal must have `LOCALSTACK_AUTH_TOKEN` specified. This should work with non-pro localstack however. You just need to modify the `compose.yaml` file.\r\n\r\n1. Start localstack using docker-compose: `docker compose up --force-recreate --build -d`\r\n\r\n#### Client commands (e.g., AWS SDK code snippet, or sequence of \"awslocal\" commands)\r\n\r\n1. Install dependencies: `yarn install`\r\n2. Bootstrap the CDK project: `yarn cdklocal bootstrap`\r\n3. Deploy the CDK project: `yarn cdklocal deploy`\r\n4. Trigger the state machine: `yarn trigger`\r\n\r\nWatch as the statemachine tries to execute, but fails saying that the long running lambda timed out after 3.00 seconds.\r\n\r\n### Environment\r\n\r\n```markdown\r\n- OS: macOS Sonoma 14.1.1 (23B81)\r\n- LocalStack: v3.0.1 Pro Docker Image\r\n```\r\n\r\n\r\n### Anything else?\r\n\r\nDemo Repository: https://github.com/noseworthy/localstack-sfn-bugs\n", "before_files": [{"content": "from botocore.client import BaseClient\nfrom botocore.config import Config\n\nfrom localstack.aws.connect import connect_to\n\n\ndef boto_client_for(region: str, account: str, service: str) -> BaseClient:\n return connect_to.get_client(\n aws_access_key_id=account,\n region_name=region,\n service_name=service,\n config=Config(parameter_validation=False),\n )\n", "path": "localstack/services/stepfunctions/asl/utils/boto_client.py"}]}
1,832
229
gh_patches_debug_22038
rasdani/github-patches
git_diff
ietf-tools__datatracker-6836
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> /doc/help/state/draft-stream-editorial/ fails with a 404 The page is looking for a state it cannot find. </issue> <code> [start of ietf/doc/views_help.py] 1 # Copyright The IETF Trust 2013-2023, All Rights Reserved 2 3 from django.shortcuts import render, get_object_or_404 4 from django.http import Http404 5 6 from ietf.doc.models import State, StateType, IESG_SUBSTATE_TAGS 7 from ietf.name.models import DocRelationshipName, DocTagName 8 from ietf.doc.utils import get_tags_for_stream_id 9 10 def state_help(request, type=None): 11 slug, title = { 12 "draft-iesg": ("draft-iesg", "IESG States for Internet-Drafts"), 13 "draft-rfceditor": ("draft-rfceditor", "RFC Editor States for Internet-Drafts"), 14 "draft-iana-action": ("draft-iana-action", "IANA Action States for Internet-Drafts"), 15 "draft-iana-review": ("draft-iana-review", "IANA Review States for Internet-Drafts"), 16 "draft-iana-experts": ("draft-iana-experts", "IANA Expert Review States for Internet-Drafts"), 17 "draft-stream-ietf": ("draft-stream-ietf", "IETF Stream States for Internet-Drafts"), 18 "draft-stream-irtf": ("draft-stream-irtf", "IRTF Stream States for Internet-Drafts"), 19 "draft-stream-ise": ("draft-stream-ise", "ISE Stream States for Internet-Drafts"), 20 "draft-stream-iab": ("draft-stream-iab", "IAB Stream States for Internet-Drafts"), 21 "charter": ("charter", "Charter States"), 22 "conflict-review": ("conflrev", "Conflict Review States"), 23 "status-change": ("statchg", "RFC Status Change States"), 24 "bofreq": ("bofreq", "BOF Request States"), 25 "procmaterials": ("procmaterials", "Proceedings Materials States"), 26 "statement": {"statement", "Statement States"} 27 }.get(type, (None, None)) 28 state_type = get_object_or_404(StateType, slug=slug) 29 30 states = State.objects.filter(used=True, type=state_type).order_by("order") 31 32 has_next_states = False 33 for state in states: 34 if state.next_states.all(): 35 has_next_states = True 36 break 37 38 tags = [] 39 40 if state_type.slug == "draft-iesg": 41 tags = DocTagName.objects.filter(slug__in=IESG_SUBSTATE_TAGS) 42 elif state_type.slug.startswith("draft-stream-"): 43 possible = get_tags_for_stream_id(state_type.slug.replace("draft-stream-", "")) 44 tags = DocTagName.objects.filter(slug__in=possible) 45 46 return render(request, "doc/state_help.html", 47 { 48 "title": title, 49 "state_type": state_type, 50 "states": states, 51 "has_next_states": has_next_states, 52 "tags": tags, 53 } ) 54 55 def relationship_help(request,subset=None): 56 subsets = { "reference": ['refnorm','refinfo','refunk','refold'], 57 "status" : ['tops','tois','tohist','toinf','tobcp','toexp'], 58 } 59 if subset and subset not in subsets: 60 raise Http404() 61 rels = DocRelationshipName.objects.filter(used=True) 62 if subset: 63 rels = rels.filter(slug__in=subsets[subset]) 64 return render(request, "doc/relationship_help.html", { "relations": rels } ) 65 [end of ietf/doc/views_help.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ietf/doc/views_help.py b/ietf/doc/views_help.py --- a/ietf/doc/views_help.py +++ b/ietf/doc/views_help.py @@ -1,5 +1,7 @@ # Copyright The IETF Trust 2013-2023, All Rights Reserved +import debug # pyflakes: ignore + from django.shortcuts import render, get_object_or_404 from django.http import Http404 @@ -18,6 +20,7 @@ "draft-stream-irtf": ("draft-stream-irtf", "IRTF Stream States for Internet-Drafts"), "draft-stream-ise": ("draft-stream-ise", "ISE Stream States for Internet-Drafts"), "draft-stream-iab": ("draft-stream-iab", "IAB Stream States for Internet-Drafts"), + "draft-stream-editorial": ("draft-stream-editorial", "Editorial Stream States for Internet-Drafts"), "charter": ("charter", "Charter States"), "conflict-review": ("conflrev", "Conflict Review States"), "status-change": ("statchg", "RFC Status Change States"),
{"golden_diff": "diff --git a/ietf/doc/views_help.py b/ietf/doc/views_help.py\n--- a/ietf/doc/views_help.py\n+++ b/ietf/doc/views_help.py\n@@ -1,5 +1,7 @@\n # Copyright The IETF Trust 2013-2023, All Rights Reserved\n \n+import debug # pyflakes: ignore\n+\n from django.shortcuts import render, get_object_or_404\n from django.http import Http404\n \n@@ -18,6 +20,7 @@\n \"draft-stream-irtf\": (\"draft-stream-irtf\", \"IRTF Stream States for Internet-Drafts\"),\n \"draft-stream-ise\": (\"draft-stream-ise\", \"ISE Stream States for Internet-Drafts\"),\n \"draft-stream-iab\": (\"draft-stream-iab\", \"IAB Stream States for Internet-Drafts\"),\n+ \"draft-stream-editorial\": (\"draft-stream-editorial\", \"Editorial Stream States for Internet-Drafts\"),\n \"charter\": (\"charter\", \"Charter States\"),\n \"conflict-review\": (\"conflrev\", \"Conflict Review States\"),\n \"status-change\": (\"statchg\", \"RFC Status Change States\"),\n", "issue": "/doc/help/state/draft-stream-editorial/ fails with a 404\nThe page is looking for a state it cannot find.\n", "before_files": [{"content": "# Copyright The IETF Trust 2013-2023, All Rights Reserved\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import Http404\n\nfrom ietf.doc.models import State, StateType, IESG_SUBSTATE_TAGS\nfrom ietf.name.models import DocRelationshipName, DocTagName\nfrom ietf.doc.utils import get_tags_for_stream_id\n\ndef state_help(request, type=None):\n slug, title = {\n \"draft-iesg\": (\"draft-iesg\", \"IESG States for Internet-Drafts\"),\n \"draft-rfceditor\": (\"draft-rfceditor\", \"RFC Editor States for Internet-Drafts\"),\n \"draft-iana-action\": (\"draft-iana-action\", \"IANA Action States for Internet-Drafts\"),\n \"draft-iana-review\": (\"draft-iana-review\", \"IANA Review States for Internet-Drafts\"),\n \"draft-iana-experts\": (\"draft-iana-experts\", \"IANA Expert Review States for Internet-Drafts\"),\n \"draft-stream-ietf\": (\"draft-stream-ietf\", \"IETF Stream States for Internet-Drafts\"),\n \"draft-stream-irtf\": (\"draft-stream-irtf\", \"IRTF Stream States for Internet-Drafts\"),\n \"draft-stream-ise\": (\"draft-stream-ise\", \"ISE Stream States for Internet-Drafts\"),\n \"draft-stream-iab\": (\"draft-stream-iab\", \"IAB Stream States for Internet-Drafts\"),\n \"charter\": (\"charter\", \"Charter States\"),\n \"conflict-review\": (\"conflrev\", \"Conflict Review States\"),\n \"status-change\": (\"statchg\", \"RFC Status Change States\"),\n \"bofreq\": (\"bofreq\", \"BOF Request States\"),\n \"procmaterials\": (\"procmaterials\", \"Proceedings Materials States\"),\n \"statement\": {\"statement\", \"Statement States\"}\n }.get(type, (None, None))\n state_type = get_object_or_404(StateType, slug=slug)\n\n states = State.objects.filter(used=True, type=state_type).order_by(\"order\")\n\n has_next_states = False\n for state in states:\n if state.next_states.all():\n has_next_states = True\n break\n\n tags = []\n\n if state_type.slug == \"draft-iesg\":\n tags = DocTagName.objects.filter(slug__in=IESG_SUBSTATE_TAGS)\n elif state_type.slug.startswith(\"draft-stream-\"):\n possible = get_tags_for_stream_id(state_type.slug.replace(\"draft-stream-\", \"\"))\n tags = DocTagName.objects.filter(slug__in=possible)\n\n return render(request, \"doc/state_help.html\",\n {\n \"title\": title,\n \"state_type\": state_type,\n \"states\": states,\n \"has_next_states\": has_next_states,\n \"tags\": tags,\n } )\n\ndef relationship_help(request,subset=None):\n subsets = { \"reference\": ['refnorm','refinfo','refunk','refold'],\n \"status\" : ['tops','tois','tohist','toinf','tobcp','toexp'],\n }\n if subset and subset not in subsets:\n raise Http404()\n rels = DocRelationshipName.objects.filter(used=True)\n if subset:\n rels = rels.filter(slug__in=subsets[subset]) \n return render(request, \"doc/relationship_help.html\", { \"relations\": rels } )\n", "path": "ietf/doc/views_help.py"}]}
1,442
258
gh_patches_debug_35166
rasdani/github-patches
git_diff
gwastro__pycbc-3561
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Need to decode approximant strings in pycbc_condition_strain Currently pycbc_condition_strain does not recognize approximants after reading them from the variable parameters table of an injection hdf. For example, when approximant 'SEOBNRv4' is stored as a variable parameter in an injection hdf, reading the hdf gives back a string b'SEOBNRv4' which is not recognized as being the same thing as 'SEOBNRv4' Running the pycbc live test contained in my pull request https://github.com/gwastro/pycbc/pull/3322 causes an error when pycbc_condition_strain gets to Applying Injections 2020-07-24 12:14:04,373 Applying injections Traceback (most recent call last): File "/home/max.trevor/dev_env/bin/pycbc_condition_strain", line 4, in <module> __import__('pkg_resources').run_script('PyCBC===e9f3da', 'pycbc_condition_strain') File "/home/max.trevor/dev_env/lib/python3.6/site-packages/pkg_resources/__init__.py", line 667, in run_script self.require(requires)[0].run_script(script_name, ns) File "/home/max.trevor/dev_env/lib/python3.6/site-packages/pkg_resources/__init__.py", line 1464, in run_script exec(code, namespace, namespace) File "/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/EGG-INFO/scripts/pycbc_condition_strain", line 87, in <module> precision=args.output_precision) File "/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/strain/strain.py", line 392, in from_cli inj_filter_rejector=inj_filter_rejector) File "/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/inject/inject.py", line 505, in apply detector_name, f_lower=f_l, distance_scale=distance_scale) File "/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/inject/inject.py", line 557, in make_strain_from_inj_object **self.extra_args) File "/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/waveform/waveform.py", line 462, in get_td_waveform (input_params['approximant'])) ValueError: Approximant b'SEOBNRv4' not available </issue> <code> [start of examples/live/generate_injections.py] 1 #!/usr/bin/env python 2 3 import os 4 import sys 5 from pycbc.io import FieldArray 6 from pycbc.inject import InjectionSet 7 8 9 if os.path.exists('./test_inj1.hdf'): 10 raise OSError("output-file 1 already exists") 11 12 if os.path.exists('./test_inj2.hdf'): 13 raise OSError("output-file 2 already exists") 14 15 dtype = [('mass1', float), ('mass2', float), 16 ('spin1z', float), ('spin2z', float), 17 ('tc', float), ('distance', float)] 18 19 # injection 1 20 static_params = {'f_lower': 18.0, 'f_ref': 18.0, 'approximant': 'SEOBNRv4', 21 'taper': 'start', 'ra': 45.0, 'dec': 45.0, 22 'inclination': 0.0, 'coa_phase': 0.0, 'polarization': 0.0} 23 24 samples = FieldArray(1, dtype=dtype) 25 26 # The following 'magic numbers' are intended to match the highest 27 # mass injection in the template bank 28 samples['mass1'] = [290.929321] 29 samples['mass2'] = [3.6755455] 30 samples['spin1z'] = [0.9934847] 31 samples['spin2z'] = [0.92713535] 32 samples['tc'] = [1272790100.1] 33 samples['distance'] = [301.5] 34 35 InjectionSet.write('test_inj1.hdf', samples, static_args=static_params, 36 injtype='cbc', cmd=" ".join(sys.argv)) 37 38 # injection 2 39 static_params['approximant'] = 'SpinTaylorT4' 40 41 samples = FieldArray(1, dtype=dtype) 42 43 # The following 'magic numbers' are intended to match the lowest 44 # mass injection in the template bank 45 samples['mass1'] = [1.1331687] 46 samples['mass2'] = [1.010624] 47 samples['spin1z'] = [0.029544285] 48 samples['spin2z'] = [0.020993788] 49 samples['tc'] = [1272790260.1] 50 samples['distance'] = [36.0] 51 52 InjectionSet.write('test_inj2.hdf', samples, static_args=static_params, 53 injtype='cbc', cmd=" ".join(sys.argv)) 54 [end of examples/live/generate_injections.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/live/generate_injections.py b/examples/live/generate_injections.py --- a/examples/live/generate_injections.py +++ b/examples/live/generate_injections.py @@ -1,53 +1,30 @@ #!/usr/bin/env python -import os import sys from pycbc.io import FieldArray from pycbc.inject import InjectionSet -if os.path.exists('./test_inj1.hdf'): - raise OSError("output-file 1 already exists") - -if os.path.exists('./test_inj2.hdf'): - raise OSError("output-file 2 already exists") - dtype = [('mass1', float), ('mass2', float), ('spin1z', float), ('spin2z', float), - ('tc', float), ('distance', float)] + ('tc', float), ('distance', float), + ('approximant', 'S32')] -# injection 1 -static_params = {'f_lower': 18.0, 'f_ref': 18.0, 'approximant': 'SEOBNRv4', +static_params = {'f_lower': 18.0, 'f_ref': 18.0, 'taper': 'start', 'ra': 45.0, 'dec': 45.0, 'inclination': 0.0, 'coa_phase': 0.0, 'polarization': 0.0} -samples = FieldArray(1, dtype=dtype) +samples = FieldArray(2, dtype=dtype) # The following 'magic numbers' are intended to match the highest -# mass injection in the template bank -samples['mass1'] = [290.929321] -samples['mass2'] = [3.6755455] -samples['spin1z'] = [0.9934847] -samples['spin2z'] = [0.92713535] -samples['tc'] = [1272790100.1] -samples['distance'] = [301.5] - -InjectionSet.write('test_inj1.hdf', samples, static_args=static_params, - injtype='cbc', cmd=" ".join(sys.argv)) - -# injection 2 -static_params['approximant'] = 'SpinTaylorT4' - -samples = FieldArray(1, dtype=dtype) - -# The following 'magic numbers' are intended to match the lowest -# mass injection in the template bank -samples['mass1'] = [1.1331687] -samples['mass2'] = [1.010624] -samples['spin1z'] = [0.029544285] -samples['spin2z'] = [0.020993788] -samples['tc'] = [1272790260.1] -samples['distance'] = [36.0] - -InjectionSet.write('test_inj2.hdf', samples, static_args=static_params, +# and lowest mass templates in the template bank +samples['mass1'] = [290.929321, 1.1331687] +samples['mass2'] = [3.6755455, 1.010624] +samples['spin1z'] = [0.9934847, 0.029544285] +samples['spin2z'] = [0.92713535, 0.020993788] +samples['tc'] = [1272790100.1, 1272790260.1] +samples['distance'] = [301.5, 36.0] +samples['approximant'] = ['SEOBNRv4', 'SpinTaylorT4'] + +InjectionSet.write('injections.hdf', samples, static_args=static_params, injtype='cbc', cmd=" ".join(sys.argv))
{"golden_diff": "diff --git a/examples/live/generate_injections.py b/examples/live/generate_injections.py\n--- a/examples/live/generate_injections.py\n+++ b/examples/live/generate_injections.py\n@@ -1,53 +1,30 @@\n #!/usr/bin/env python\n \n-import os\n import sys\n from pycbc.io import FieldArray\n from pycbc.inject import InjectionSet\n \n \n-if os.path.exists('./test_inj1.hdf'):\n- raise OSError(\"output-file 1 already exists\")\n-\n-if os.path.exists('./test_inj2.hdf'):\n- raise OSError(\"output-file 2 already exists\")\n-\n dtype = [('mass1', float), ('mass2', float),\n ('spin1z', float), ('spin2z', float),\n- ('tc', float), ('distance', float)]\n+ ('tc', float), ('distance', float),\n+ ('approximant', 'S32')]\n \n-# injection 1\n-static_params = {'f_lower': 18.0, 'f_ref': 18.0, 'approximant': 'SEOBNRv4',\n+static_params = {'f_lower': 18.0, 'f_ref': 18.0,\n 'taper': 'start', 'ra': 45.0, 'dec': 45.0,\n 'inclination': 0.0, 'coa_phase': 0.0, 'polarization': 0.0}\n \n-samples = FieldArray(1, dtype=dtype)\n+samples = FieldArray(2, dtype=dtype)\n \n # The following 'magic numbers' are intended to match the highest\n-# mass injection in the template bank\n-samples['mass1'] = [290.929321]\n-samples['mass2'] = [3.6755455]\n-samples['spin1z'] = [0.9934847]\n-samples['spin2z'] = [0.92713535]\n-samples['tc'] = [1272790100.1]\n-samples['distance'] = [301.5]\n-\n-InjectionSet.write('test_inj1.hdf', samples, static_args=static_params,\n- injtype='cbc', cmd=\" \".join(sys.argv))\n-\n-# injection 2\n-static_params['approximant'] = 'SpinTaylorT4'\n-\n-samples = FieldArray(1, dtype=dtype)\n-\n-# The following 'magic numbers' are intended to match the lowest\n-# mass injection in the template bank\n-samples['mass1'] = [1.1331687]\n-samples['mass2'] = [1.010624]\n-samples['spin1z'] = [0.029544285]\n-samples['spin2z'] = [0.020993788]\n-samples['tc'] = [1272790260.1]\n-samples['distance'] = [36.0]\n-\n-InjectionSet.write('test_inj2.hdf', samples, static_args=static_params,\n+# and lowest mass templates in the template bank\n+samples['mass1'] = [290.929321, 1.1331687]\n+samples['mass2'] = [3.6755455, 1.010624]\n+samples['spin1z'] = [0.9934847, 0.029544285]\n+samples['spin2z'] = [0.92713535, 0.020993788]\n+samples['tc'] = [1272790100.1, 1272790260.1]\n+samples['distance'] = [301.5, 36.0]\n+samples['approximant'] = ['SEOBNRv4', 'SpinTaylorT4']\n+\n+InjectionSet.write('injections.hdf', samples, static_args=static_params,\n injtype='cbc', cmd=\" \".join(sys.argv))\n", "issue": "Need to decode approximant strings in pycbc_condition_strain \nCurrently pycbc_condition_strain does not recognize approximants after reading them from the variable parameters table of an injection hdf. For example, when approximant 'SEOBNRv4' is stored as a variable parameter in an injection hdf, reading the hdf gives back a string b'SEOBNRv4' which is not recognized as being the same thing as 'SEOBNRv4'\r\n\r\nRunning the pycbc live test contained in my pull request https://github.com/gwastro/pycbc/pull/3322 causes an error when pycbc_condition_strain gets to Applying Injections\r\n\r\n2020-07-24 12:14:04,373 Applying injections\r\nTraceback (most recent call last):\r\n File \"/home/max.trevor/dev_env/bin/pycbc_condition_strain\", line 4, in <module>\r\n __import__('pkg_resources').run_script('PyCBC===e9f3da', 'pycbc_condition_strain')\r\n File \"/home/max.trevor/dev_env/lib/python3.6/site-packages/pkg_resources/__init__.py\", line 667, in run_script\r\n self.require(requires)[0].run_script(script_name, ns)\r\n File \"/home/max.trevor/dev_env/lib/python3.6/site-packages/pkg_resources/__init__.py\", line 1464, in run_script\r\n exec(code, namespace, namespace)\r\n File \"/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/EGG-INFO/scripts/pycbc_condition_strain\", line 87, in <module>\r\n precision=args.output_precision)\r\n File \"/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/strain/strain.py\", line 392, in from_cli\r\n inj_filter_rejector=inj_filter_rejector)\r\n File \"/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/inject/inject.py\", line 505, in apply\r\n detector_name, f_lower=f_l, distance_scale=distance_scale)\r\n File \"/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/inject/inject.py\", line 557, in make_strain_from_inj_object\r\n **self.extra_args)\r\n File \"/home/max.trevor/dev_env/lib/python3.6/site-packages/PyCBC-e9f3da-py3.6-linux-x86_64.egg/pycbc/waveform/waveform.py\", line 462, in get_td_waveform\r\n (input_params['approximant']))\r\nValueError: Approximant b'SEOBNRv4' not available\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport sys\nfrom pycbc.io import FieldArray\nfrom pycbc.inject import InjectionSet\n\n\nif os.path.exists('./test_inj1.hdf'):\n raise OSError(\"output-file 1 already exists\")\n\nif os.path.exists('./test_inj2.hdf'):\n raise OSError(\"output-file 2 already exists\")\n\ndtype = [('mass1', float), ('mass2', float),\n ('spin1z', float), ('spin2z', float),\n ('tc', float), ('distance', float)]\n\n# injection 1\nstatic_params = {'f_lower': 18.0, 'f_ref': 18.0, 'approximant': 'SEOBNRv4',\n 'taper': 'start', 'ra': 45.0, 'dec': 45.0,\n 'inclination': 0.0, 'coa_phase': 0.0, 'polarization': 0.0}\n\nsamples = FieldArray(1, dtype=dtype)\n\n# The following 'magic numbers' are intended to match the highest\n# mass injection in the template bank\nsamples['mass1'] = [290.929321]\nsamples['mass2'] = [3.6755455]\nsamples['spin1z'] = [0.9934847]\nsamples['spin2z'] = [0.92713535]\nsamples['tc'] = [1272790100.1]\nsamples['distance'] = [301.5]\n\nInjectionSet.write('test_inj1.hdf', samples, static_args=static_params,\n injtype='cbc', cmd=\" \".join(sys.argv))\n\n# injection 2\nstatic_params['approximant'] = 'SpinTaylorT4'\n\nsamples = FieldArray(1, dtype=dtype)\n\n# The following 'magic numbers' are intended to match the lowest\n# mass injection in the template bank\nsamples['mass1'] = [1.1331687]\nsamples['mass2'] = [1.010624]\nsamples['spin1z'] = [0.029544285]\nsamples['spin2z'] = [0.020993788]\nsamples['tc'] = [1272790260.1]\nsamples['distance'] = [36.0]\n\nInjectionSet.write('test_inj2.hdf', samples, static_args=static_params,\n injtype='cbc', cmd=\" \".join(sys.argv))\n", "path": "examples/live/generate_injections.py"}]}
1,903
943
gh_patches_debug_11308
rasdani/github-patches
git_diff
scrapy__scrapy-2418
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> inspect_response(response) yields incorrect response in IPython shell Example case (requires registration at example site, and even then would be hard to use as a use-case; modify to suit your needs): http://pastebin.com/GT8N893q In the above example, the response.meta printout in after_submit callback does not match that within the inspect_response shell on the second iteration (the first is correct). It appears that inspect_response has a stale response the second time. </issue> <code> [start of scrapy/utils/console.py] 1 from functools import wraps 2 from collections import OrderedDict 3 4 def _embed_ipython_shell(namespace={}, banner=''): 5 """Start an IPython Shell""" 6 try: 7 from IPython.terminal.embed import InteractiveShellEmbed 8 from IPython.terminal.ipapp import load_default_config 9 except ImportError: 10 from IPython.frontend.terminal.embed import InteractiveShellEmbed 11 from IPython.frontend.terminal.ipapp import load_default_config 12 13 @wraps(_embed_ipython_shell) 14 def wrapper(namespace=namespace, banner=''): 15 config = load_default_config() 16 # Always use .instace() to ensure _instance propagation to all parents 17 # this is needed for <TAB> completion works well for new imports 18 shell = InteractiveShellEmbed.instance( 19 banner1=banner, user_ns=namespace, config=config) 20 shell() 21 return wrapper 22 23 def _embed_bpython_shell(namespace={}, banner=''): 24 """Start a bpython shell""" 25 import bpython 26 @wraps(_embed_bpython_shell) 27 def wrapper(namespace=namespace, banner=''): 28 bpython.embed(locals_=namespace, banner=banner) 29 return wrapper 30 31 def _embed_standard_shell(namespace={}, banner=''): 32 """Start a standard python shell""" 33 import code 34 try: # readline module is only available on unix systems 35 import readline 36 except ImportError: 37 pass 38 else: 39 import rlcompleter 40 readline.parse_and_bind("tab:complete") 41 @wraps(_embed_standard_shell) 42 def wrapper(namespace=namespace, banner=''): 43 code.interact(banner=banner, local=namespace) 44 return wrapper 45 46 DEFAULT_PYTHON_SHELLS = OrderedDict([ 47 ('ipython', _embed_ipython_shell), 48 ('bpython', _embed_bpython_shell), 49 ( 'python', _embed_standard_shell), 50 ]) 51 52 def get_shell_embed_func(shells=None, known_shells=None): 53 """Return the first acceptable shell-embed function 54 from a given list of shell names. 55 """ 56 if shells is None: # list, preference order of shells 57 shells = DEFAULT_PYTHON_SHELLS.keys() 58 if known_shells is None: # available embeddable shells 59 known_shells = DEFAULT_PYTHON_SHELLS.copy() 60 for shell in shells: 61 if shell in known_shells: 62 try: 63 # function test: run all setup code (imports), 64 # but dont fall into the shell 65 return known_shells[shell]() 66 except ImportError: 67 continue 68 69 def start_python_console(namespace=None, banner='', shells=None): 70 """Start Python console bound to the given namespace. 71 Readline support and tab completion will be used on Unix, if available. 72 """ 73 if namespace is None: 74 namespace = {} 75 76 try: 77 shell = get_shell_embed_func(shells) 78 if shell is not None: 79 shell(namespace=namespace, banner=banner) 80 except SystemExit: # raised when using exit() in python code.interact 81 pass 82 [end of scrapy/utils/console.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/utils/console.py b/scrapy/utils/console.py --- a/scrapy/utils/console.py +++ b/scrapy/utils/console.py @@ -15,6 +15,9 @@ config = load_default_config() # Always use .instace() to ensure _instance propagation to all parents # this is needed for <TAB> completion works well for new imports + # and clear the instance to always have the fresh env + # on repeated breaks like with inspect_response() + InteractiveShellEmbed.clear_instance() shell = InteractiveShellEmbed.instance( banner1=banner, user_ns=namespace, config=config) shell()
{"golden_diff": "diff --git a/scrapy/utils/console.py b/scrapy/utils/console.py\n--- a/scrapy/utils/console.py\n+++ b/scrapy/utils/console.py\n@@ -15,6 +15,9 @@\n config = load_default_config()\n # Always use .instace() to ensure _instance propagation to all parents\n # this is needed for <TAB> completion works well for new imports\n+ # and clear the instance to always have the fresh env\n+ # on repeated breaks like with inspect_response()\n+ InteractiveShellEmbed.clear_instance()\n shell = InteractiveShellEmbed.instance(\n banner1=banner, user_ns=namespace, config=config)\n shell()\n", "issue": "inspect_response(response) yields incorrect response in IPython shell\nExample case (requires registration at example site, and even then would be hard to use as a use-case; modify to suit your needs): http://pastebin.com/GT8N893q\n\nIn the above example, the response.meta printout in after_submit callback does not match that within the inspect_response shell on the second iteration (the first is correct). It appears that inspect_response has a stale response the second time.\n\n", "before_files": [{"content": "from functools import wraps\nfrom collections import OrderedDict\n\ndef _embed_ipython_shell(namespace={}, banner=''):\n \"\"\"Start an IPython Shell\"\"\"\n try:\n from IPython.terminal.embed import InteractiveShellEmbed\n from IPython.terminal.ipapp import load_default_config\n except ImportError:\n from IPython.frontend.terminal.embed import InteractiveShellEmbed\n from IPython.frontend.terminal.ipapp import load_default_config\n\n @wraps(_embed_ipython_shell)\n def wrapper(namespace=namespace, banner=''):\n config = load_default_config()\n # Always use .instace() to ensure _instance propagation to all parents\n # this is needed for <TAB> completion works well for new imports\n shell = InteractiveShellEmbed.instance(\n banner1=banner, user_ns=namespace, config=config)\n shell()\n return wrapper\n\ndef _embed_bpython_shell(namespace={}, banner=''):\n \"\"\"Start a bpython shell\"\"\"\n import bpython\n @wraps(_embed_bpython_shell)\n def wrapper(namespace=namespace, banner=''):\n bpython.embed(locals_=namespace, banner=banner)\n return wrapper\n\ndef _embed_standard_shell(namespace={}, banner=''):\n \"\"\"Start a standard python shell\"\"\"\n import code\n try: # readline module is only available on unix systems\n import readline\n except ImportError:\n pass\n else:\n import rlcompleter\n readline.parse_and_bind(\"tab:complete\")\n @wraps(_embed_standard_shell)\n def wrapper(namespace=namespace, banner=''):\n code.interact(banner=banner, local=namespace)\n return wrapper\n\nDEFAULT_PYTHON_SHELLS = OrderedDict([\n ('ipython', _embed_ipython_shell),\n ('bpython', _embed_bpython_shell),\n ( 'python', _embed_standard_shell),\n])\n\ndef get_shell_embed_func(shells=None, known_shells=None):\n \"\"\"Return the first acceptable shell-embed function\n from a given list of shell names.\n \"\"\"\n if shells is None: # list, preference order of shells\n shells = DEFAULT_PYTHON_SHELLS.keys()\n if known_shells is None: # available embeddable shells\n known_shells = DEFAULT_PYTHON_SHELLS.copy()\n for shell in shells:\n if shell in known_shells:\n try:\n # function test: run all setup code (imports),\n # but dont fall into the shell\n return known_shells[shell]()\n except ImportError:\n continue\n\ndef start_python_console(namespace=None, banner='', shells=None):\n \"\"\"Start Python console bound to the given namespace.\n Readline support and tab completion will be used on Unix, if available.\n \"\"\"\n if namespace is None:\n namespace = {}\n\n try:\n shell = get_shell_embed_func(shells)\n if shell is not None:\n shell(namespace=namespace, banner=banner)\n except SystemExit: # raised when using exit() in python code.interact\n pass\n", "path": "scrapy/utils/console.py"}]}
1,419
143
gh_patches_debug_26537
rasdani/github-patches
git_diff
cookiecutter__cookiecutter-1297
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add support of python 3.8 - [x] Add to travis - [x] Add to appveyor - [x] Add to tox - [x] Add to setup.py - [x] Add to docs </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """cookiecutter distutils configuration""" 5 6 import os 7 import io 8 import sys 9 10 from setuptools import setup 11 12 version = "1.7.0" 13 14 if sys.argv[-1] == 'publish': 15 os.system('python setup.py sdist upload') 16 os.system('python setup.py bdist_wheel upload') 17 sys.exit() 18 19 if sys.argv[-1] == 'tag': 20 os.system("git tag -a %s -m 'version %s'" % (version, version)) 21 os.system("git push --tags") 22 sys.exit() 23 24 with io.open('README.md', 'r', encoding='utf-8') as readme_file: 25 readme = readme_file.read() 26 27 requirements = [ 28 'binaryornot>=0.2.0', 29 'jinja2>=2.7', 30 'click>=7.0', 31 'poyo>=0.1.0', 32 'jinja2-time>=0.1.0', 33 'requests>=2.18.0', 34 'six>=1.10', 35 ] 36 37 if sys.argv[-1] == 'readme': 38 print(readme) 39 sys.exit() 40 41 42 setup( 43 name='cookiecutter', 44 version=version, 45 description=('A command-line utility that creates projects from project ' 46 'templates, e.g. creating a Python package project from a ' 47 'Python package project template.'), 48 long_description=readme, 49 long_description_content_type='text/markdown', 50 author='Audrey Roy', 51 author_email='[email protected]', 52 url='https://github.com/cookiecutter/cookiecutter', 53 packages=[ 54 'cookiecutter', 55 ], 56 package_dir={'cookiecutter': 'cookiecutter'}, 57 entry_points={ 58 'console_scripts': [ 59 'cookiecutter = cookiecutter.__main__:main', 60 ] 61 }, 62 include_package_data=True, 63 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*', 64 install_requires=requirements, 65 extras_require={ 66 ':python_version<"3.3"': ['whichcraft>=0.4.0'], 67 }, 68 license='BSD', 69 zip_safe=False, 70 classifiers=[ 71 'Development Status :: 5 - Production/Stable', 72 'Environment :: Console', 73 'Intended Audience :: Developers', 74 'Natural Language :: English', 75 'License :: OSI Approved :: BSD License', 76 'Programming Language :: Python', 77 'Programming Language :: Python :: 2', 78 'Programming Language :: Python :: 2.7', 79 'Programming Language :: Python :: 3', 80 'Programming Language :: Python :: 3.5', 81 'Programming Language :: Python :: 3.6', 82 'Programming Language :: Python :: 3.7', 83 'Programming Language :: Python :: Implementation :: CPython', 84 'Programming Language :: Python :: Implementation :: PyPy', 85 'Topic :: Software Development', 86 ], 87 keywords=( 88 'cookiecutter, Python, projects, project templates, Jinja2, ' 89 'skeleton, scaffolding, project directory, setup.py, package, ' 90 'packaging' 91 ), 92 ) 93 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -68,21 +68,22 @@ license='BSD', zip_safe=False, classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Environment :: Console', - 'Intended Audience :: Developers', - 'Natural Language :: English', - 'License :: OSI Approved :: BSD License', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: Implementation :: CPython', - 'Programming Language :: Python :: Implementation :: PyPy', - 'Topic :: Software Development', + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "Intended Audience :: Developers", + "Natural Language :: English", + "License :: OSI Approved :: BSD License", + "Programming Language :: Python", + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", + "Topic :: Software Development", ], keywords=( 'cookiecutter, Python, projects, project templates, Jinja2, '
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -68,21 +68,22 @@\n license='BSD',\n zip_safe=False,\n classifiers=[\n- 'Development Status :: 5 - Production/Stable',\n- 'Environment :: Console',\n- 'Intended Audience :: Developers',\n- 'Natural Language :: English',\n- 'License :: OSI Approved :: BSD License',\n- 'Programming Language :: Python',\n- 'Programming Language :: Python :: 2',\n- 'Programming Language :: Python :: 2.7',\n- 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.5',\n- 'Programming Language :: Python :: 3.6',\n- 'Programming Language :: Python :: 3.7',\n- 'Programming Language :: Python :: Implementation :: CPython',\n- 'Programming Language :: Python :: Implementation :: PyPy',\n- 'Topic :: Software Development',\n+ \"Development Status :: 5 - Production/Stable\",\n+ \"Environment :: Console\",\n+ \"Intended Audience :: Developers\",\n+ \"Natural Language :: English\",\n+ \"License :: OSI Approved :: BSD License\",\n+ \"Programming Language :: Python\",\n+ \"Programming Language :: Python :: 2\",\n+ \"Programming Language :: Python :: 2.7\",\n+ \"Programming Language :: Python :: 3\",\n+ \"Programming Language :: Python :: 3.5\",\n+ \"Programming Language :: Python :: 3.6\",\n+ \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: Implementation :: CPython\",\n+ \"Programming Language :: Python :: Implementation :: PyPy\",\n+ \"Topic :: Software Development\",\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n", "issue": "Add support of python 3.8\n- [x] Add to travis\r\n- [x] Add to appveyor\r\n- [x] Add to tox\r\n- [x] Add to setup.py\r\n- [x] Add to docs\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"cookiecutter distutils configuration\"\"\"\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.7.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.md', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'click>=7.0',\n 'poyo>=0.1.0',\n 'jinja2-time>=0.1.0',\n 'requests>=2.18.0',\n 'six>=1.10',\n]\n\nif sys.argv[-1] == 'readme':\n print(readme)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.__main__:main',\n ]\n },\n include_package_data=True,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n install_requires=requirements,\n extras_require={\n ':python_version<\"3.3\"': ['whichcraft>=0.4.0'],\n },\n license='BSD',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development',\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}]}
1,457
412
gh_patches_debug_23369
rasdani/github-patches
git_diff
web2py__web2py-1294
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> empty strings stored as NULL in db https://groups.google.com/forum/#!topic/web2py/IYzhhd3eKXQ </issue> <code> [start of gluon/dal.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """ 5 | This file is part of the web2py Web Framework 6 | Copyrighted by Massimo Di Pierro <[email protected]> 7 | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 8 9 Takes care of adapting pyDAL to web2py's needs 10 ----------------------------------------------- 11 """ 12 13 from pydal import DAL as DAL 14 from pydal import Field 15 from pydal.objects import Row, Rows, Table, Query, Set, Expression 16 from pydal import SQLCustomType, geoPoint, geoLine, geoPolygon 17 18 19 def _default_validators(db, field): 20 """ 21 Field type validation, using web2py's validators mechanism. 22 23 makes sure the content of a field is in line with the declared 24 fieldtype 25 """ 26 from gluon import validators 27 field_type, field_length = field.type, field.length 28 requires = [] 29 30 if field_type in (('string', 'text', 'password')): 31 requires.append(validators.IS_LENGTH(field_length)) 32 elif field_type == 'json': 33 requires.append(validators.IS_EMPTY_OR(validators.IS_JSON())) 34 elif field_type == 'double' or field_type == 'float': 35 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 36 elif field_type == 'integer': 37 requires.append(validators.IS_INT_IN_RANGE(-2**31, 2**31)) 38 elif field_type == 'bigint': 39 requires.append(validators.IS_INT_IN_RANGE(-2**63, 2**63)) 40 elif field_type.startswith('decimal'): 41 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 42 elif field_type == 'date': 43 requires.append(validators.IS_DATE()) 44 elif field_type == 'time': 45 requires.append(validators.IS_TIME()) 46 elif field_type == 'datetime': 47 requires.append(validators.IS_DATETIME()) 48 elif db and field_type.startswith('reference') and \ 49 field_type.find('.') < 0 and \ 50 field_type[10:] in db.tables: 51 referenced = db[field_type[10:]] 52 if hasattr(referenced, '_format') and referenced._format: 53 requires = validators.IS_IN_DB(db, referenced._id, 54 referenced._format) 55 if field.unique: 56 requires._and = validators.IS_NOT_IN_DB(db, field) 57 if field.tablename == field_type[10:]: 58 return validators.IS_EMPTY_OR(requires) 59 return requires 60 elif db and field_type.startswith('list:reference') and \ 61 field_type.find('.') < 0 and \ 62 field_type[15:] in db.tables: 63 referenced = db[field_type[15:]] 64 if hasattr(referenced, '_format') and referenced._format: 65 requires = validators.IS_IN_DB(db, referenced._id, 66 referenced._format, multiple=True) 67 else: 68 requires = validators.IS_IN_DB(db, referenced._id, 69 multiple=True) 70 if field.unique: 71 requires._and = validators.IS_NOT_IN_DB(db, field) 72 if not field.notnull: 73 requires = validators.IS_EMPTY_OR(requires) 74 return requires 75 # does not get here for reference and list:reference 76 if field.unique: 77 requires.insert(0,validators.IS_NOT_IN_DB(db, field)) 78 excluded_fields = ['string','upload','text','password','boolean'] 79 if (field.notnull or field.unique) and not field_type in excluded_fields: 80 requires.insert(0,validators.IS_NOT_EMPTY()) 81 elif not field.notnull and not field.unique and requires: 82 requires[0] = validators.IS_EMPTY_OR(requires[0]) 83 return requires 84 85 from gluon.serializers import custom_json, xml 86 from gluon.utils import web2py_uuid 87 from gluon import sqlhtml 88 89 90 DAL.serializers = {'json': custom_json, 'xml': xml} 91 DAL.validators_method = _default_validators 92 DAL.uuid = lambda x: web2py_uuid() 93 DAL.representers = { 94 'rows_render': sqlhtml.represent, 95 'rows_xml': sqlhtml.SQLTABLE 96 } 97 DAL.Field = Field 98 DAL.Table = Table 99 100 #: add web2py contrib drivers to pyDAL 101 from pydal.drivers import DRIVERS 102 if not DRIVERS.get('pymysql'): 103 try: 104 from .contrib import pymysql 105 DRIVERS['pymysql'] = pymysql 106 except: 107 pass 108 if not DRIVERS.get('pyodbc'): 109 try: 110 from .contrib import pypyodbc as pyodbc 111 DRIVERS['pyodbc'] = pyodbc 112 except: 113 pass 114 if not DRIVERS.get('pg8000'): 115 try: 116 from .contrib import pg8000 117 DRIVERS['pg8000'] = pg8000 118 except: 119 pass 120 [end of gluon/dal.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gluon/dal.py b/gluon/dal.py --- a/gluon/dal.py +++ b/gluon/dal.py @@ -74,12 +74,12 @@ return requires # does not get here for reference and list:reference if field.unique: - requires.insert(0,validators.IS_NOT_IN_DB(db, field)) - excluded_fields = ['string','upload','text','password','boolean'] + requires.insert(0, validators.IS_NOT_IN_DB(db, field)) + excluded_fields = ['string', 'upload', 'text', 'password', 'boolean'] if (field.notnull or field.unique) and not field_type in excluded_fields: - requires.insert(0,validators.IS_NOT_EMPTY()) + requires.insert(0, validators.IS_NOT_EMPTY()) elif not field.notnull and not field.unique and requires: - requires[0] = validators.IS_EMPTY_OR(requires[0]) + requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field in ('string', 'text', 'password') else None) return requires from gluon.serializers import custom_json, xml @@ -93,7 +93,7 @@ DAL.representers = { 'rows_render': sqlhtml.represent, 'rows_xml': sqlhtml.SQLTABLE - } +} DAL.Field = Field DAL.Table = Table
{"golden_diff": "diff --git a/gluon/dal.py b/gluon/dal.py\n--- a/gluon/dal.py\n+++ b/gluon/dal.py\n@@ -74,12 +74,12 @@\n return requires\n # does not get here for reference and list:reference\n if field.unique:\n- requires.insert(0,validators.IS_NOT_IN_DB(db, field))\n- excluded_fields = ['string','upload','text','password','boolean']\n+ requires.insert(0, validators.IS_NOT_IN_DB(db, field))\n+ excluded_fields = ['string', 'upload', 'text', 'password', 'boolean']\n if (field.notnull or field.unique) and not field_type in excluded_fields:\n- requires.insert(0,validators.IS_NOT_EMPTY())\n+ requires.insert(0, validators.IS_NOT_EMPTY())\n elif not field.notnull and not field.unique and requires:\n- requires[0] = validators.IS_EMPTY_OR(requires[0])\n+ requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field in ('string', 'text', 'password') else None)\n return requires\n \n from gluon.serializers import custom_json, xml\n@@ -93,7 +93,7 @@\n DAL.representers = {\n 'rows_render': sqlhtml.represent,\n 'rows_xml': sqlhtml.SQLTABLE\n- }\n+}\n DAL.Field = Field\n DAL.Table = Table\n", "issue": "empty strings stored as NULL in db\nhttps://groups.google.com/forum/#!topic/web2py/IYzhhd3eKXQ\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n| This file is part of the web2py Web Framework\n| Copyrighted by Massimo Di Pierro <[email protected]>\n| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n\nTakes care of adapting pyDAL to web2py's needs\n-----------------------------------------------\n\"\"\"\n\nfrom pydal import DAL as DAL\nfrom pydal import Field\nfrom pydal.objects import Row, Rows, Table, Query, Set, Expression\nfrom pydal import SQLCustomType, geoPoint, geoLine, geoPolygon\n\n\ndef _default_validators(db, field):\n \"\"\"\n Field type validation, using web2py's validators mechanism.\n\n makes sure the content of a field is in line with the declared\n fieldtype\n \"\"\"\n from gluon import validators\n field_type, field_length = field.type, field.length\n requires = []\n\n if field_type in (('string', 'text', 'password')):\n requires.append(validators.IS_LENGTH(field_length))\n elif field_type == 'json':\n requires.append(validators.IS_EMPTY_OR(validators.IS_JSON()))\n elif field_type == 'double' or field_type == 'float':\n requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100))\n elif field_type == 'integer':\n requires.append(validators.IS_INT_IN_RANGE(-2**31, 2**31))\n elif field_type == 'bigint':\n requires.append(validators.IS_INT_IN_RANGE(-2**63, 2**63))\n elif field_type.startswith('decimal'):\n requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10))\n elif field_type == 'date':\n requires.append(validators.IS_DATE())\n elif field_type == 'time':\n requires.append(validators.IS_TIME())\n elif field_type == 'datetime':\n requires.append(validators.IS_DATETIME())\n elif db and field_type.startswith('reference') and \\\n field_type.find('.') < 0 and \\\n field_type[10:] in db.tables:\n referenced = db[field_type[10:]]\n if hasattr(referenced, '_format') and referenced._format:\n requires = validators.IS_IN_DB(db, referenced._id,\n referenced._format)\n if field.unique:\n requires._and = validators.IS_NOT_IN_DB(db, field)\n if field.tablename == field_type[10:]:\n return validators.IS_EMPTY_OR(requires)\n return requires\n elif db and field_type.startswith('list:reference') and \\\n field_type.find('.') < 0 and \\\n field_type[15:] in db.tables:\n referenced = db[field_type[15:]]\n if hasattr(referenced, '_format') and referenced._format:\n requires = validators.IS_IN_DB(db, referenced._id,\n referenced._format, multiple=True)\n else:\n requires = validators.IS_IN_DB(db, referenced._id,\n multiple=True)\n if field.unique:\n requires._and = validators.IS_NOT_IN_DB(db, field)\n if not field.notnull:\n requires = validators.IS_EMPTY_OR(requires)\n return requires\n # does not get here for reference and list:reference\n if field.unique:\n requires.insert(0,validators.IS_NOT_IN_DB(db, field))\n excluded_fields = ['string','upload','text','password','boolean']\n if (field.notnull or field.unique) and not field_type in excluded_fields:\n requires.insert(0,validators.IS_NOT_EMPTY())\n elif not field.notnull and not field.unique and requires:\n requires[0] = validators.IS_EMPTY_OR(requires[0])\n return requires\n\nfrom gluon.serializers import custom_json, xml\nfrom gluon.utils import web2py_uuid\nfrom gluon import sqlhtml\n\n\nDAL.serializers = {'json': custom_json, 'xml': xml}\nDAL.validators_method = _default_validators\nDAL.uuid = lambda x: web2py_uuid()\nDAL.representers = {\n 'rows_render': sqlhtml.represent,\n 'rows_xml': sqlhtml.SQLTABLE\n }\nDAL.Field = Field\nDAL.Table = Table\n\n#: add web2py contrib drivers to pyDAL\nfrom pydal.drivers import DRIVERS\nif not DRIVERS.get('pymysql'):\n try:\n from .contrib import pymysql\n DRIVERS['pymysql'] = pymysql\n except:\n pass\nif not DRIVERS.get('pyodbc'):\n try:\n from .contrib import pypyodbc as pyodbc\n DRIVERS['pyodbc'] = pyodbc\n except:\n pass\nif not DRIVERS.get('pg8000'):\n try:\n from .contrib import pg8000\n DRIVERS['pg8000'] = pg8000\n except:\n pass\n", "path": "gluon/dal.py"}]}
1,874
313
gh_patches_debug_47493
rasdani/github-patches
git_diff
geopandas__geopandas-2249
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DOC: Address GeoPandas op deprecation in docs While working on #2211 I noticed instances of the `op` parameter still being used. This `op` parameter was deprecated in pull request #1626 in favour of `predicate`. Locations where op is still present includes: * [sjoin benchmark](https://github.com/geopandas/geopandas/blob/master/benchmarks/sjoin.py) * [Spatial Joins notebook](https://github.com/geopandas/geopandas/blob/master/doc/source/gallery/spatial_joins.ipynb) I can address the notebook instance but I don't know what the benchmark instance of `op` does so wouldn't want to change it without a thumbs up from a maintainer. </issue> <code> [start of benchmarks/sjoin.py] 1 import random 2 3 from geopandas import GeoDataFrame, GeoSeries, sjoin 4 from shapely.geometry import Point, LineString, Polygon 5 import numpy as np 6 7 8 class Bench: 9 10 param_names = ['op'] 11 params = [('intersects', 'contains', 'within')] 12 13 def setup(self, *args): 14 triangles = GeoSeries( 15 [Polygon([(random.random(), random.random()) for _ in range(3)]) 16 for _ in range(1000)]) 17 18 points = GeoSeries( 19 [Point(x, y) for x, y in zip(np.random.random(10000), 20 np.random.random(10000))]) 21 22 df1 = GeoDataFrame({'val1': np.random.randn(len(triangles)), 23 'geometry': triangles}) 24 df2 = GeoDataFrame({'val1': np.random.randn(len(points)), 25 'geometry': points}) 26 27 self.df1, self.df2 = df1, df2 28 29 def time_sjoin(self, op): 30 sjoin(self.df1, self.df2, op=op) 31 [end of benchmarks/sjoin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/benchmarks/sjoin.py b/benchmarks/sjoin.py --- a/benchmarks/sjoin.py +++ b/benchmarks/sjoin.py @@ -26,5 +26,5 @@ self.df1, self.df2 = df1, df2 - def time_sjoin(self, op): - sjoin(self.df1, self.df2, op=op) + def time_sjoin(self, predicate): + sjoin(self.df1, self.df2, predicate=predicate)
{"golden_diff": "diff --git a/benchmarks/sjoin.py b/benchmarks/sjoin.py\n--- a/benchmarks/sjoin.py\n+++ b/benchmarks/sjoin.py\n@@ -26,5 +26,5 @@\n \n self.df1, self.df2 = df1, df2\n \n- def time_sjoin(self, op):\n- sjoin(self.df1, self.df2, op=op)\n+ def time_sjoin(self, predicate):\n+ sjoin(self.df1, self.df2, predicate=predicate)\n", "issue": "DOC: Address GeoPandas op deprecation in docs\nWhile working on #2211 I noticed instances of the `op` parameter still being used.\r\n\r\nThis `op` parameter was deprecated in pull request #1626 in favour of `predicate`.\r\n\r\nLocations where op is still present includes:\r\n* [sjoin benchmark](https://github.com/geopandas/geopandas/blob/master/benchmarks/sjoin.py)\r\n* [Spatial Joins notebook](https://github.com/geopandas/geopandas/blob/master/doc/source/gallery/spatial_joins.ipynb)\r\n \r\nI can address the notebook instance but I don't know what the benchmark instance of `op` does so wouldn't want to change it without a thumbs up from a maintainer.\n", "before_files": [{"content": "import random\n\nfrom geopandas import GeoDataFrame, GeoSeries, sjoin\nfrom shapely.geometry import Point, LineString, Polygon\nimport numpy as np\n\n\nclass Bench:\n\n param_names = ['op']\n params = [('intersects', 'contains', 'within')]\n\n def setup(self, *args):\n triangles = GeoSeries(\n [Polygon([(random.random(), random.random()) for _ in range(3)])\n for _ in range(1000)])\n\n points = GeoSeries(\n [Point(x, y) for x, y in zip(np.random.random(10000),\n np.random.random(10000))])\n\n df1 = GeoDataFrame({'val1': np.random.randn(len(triangles)),\n 'geometry': triangles})\n df2 = GeoDataFrame({'val1': np.random.randn(len(points)),\n 'geometry': points})\n\n self.df1, self.df2 = df1, df2\n\n def time_sjoin(self, op):\n sjoin(self.df1, self.df2, op=op)\n", "path": "benchmarks/sjoin.py"}]}
977
117
gh_patches_debug_30426
rasdani/github-patches
git_diff
sunpy__sunpy-2658
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Figure tests directory gets wiped after running via the test runner. Just discovered that when I run: ```bash python setup.py test --figure-only ``` The resulting figure images are saved in a tmp directory that gets wiped at the end of the process (this is under linux for me) Using `pytest` adds the folder into my sunpy folder. </issue> <code> [start of sunpy/conftest.py] 1 from __future__ import absolute_import, print_function 2 from functools import partial 3 4 import os 5 import tempfile 6 import json 7 8 # Force MPL to use non-gui backends for testing. 9 try: 10 import matplotlib 11 except ImportError: 12 pass 13 else: 14 matplotlib.use('Agg') 15 16 from sunpy.tests.hash import HASH_LIBRARY_NAME 17 from sunpy.tests.helpers import new_hash_library, test_fig_dir 18 from sunpy.extern import six 19 20 import pytest 21 22 23 # Don't actually import pytest_remotedata because that can do things to the 24 # entrypoints code in pytest. 25 if six.PY2: 26 import imp 27 try: 28 imp.find_module('pytest_remotedata') 29 HAVE_REMOTEDATA = True 30 except ImportError: 31 HAVE_REMOTEDATA = False 32 else: 33 import importlib 34 remotedata_spec = importlib.util.find_spec("pytest_remotedata") 35 HAVE_REMOTEDATA = remotedata_spec is not None 36 37 38 def pytest_runtest_setup(item): 39 """ 40 pytest hook to skip all tests that have the mark 'online' if the 41 client is online (simply detected by checking whether http://www.google.com 42 can be requested). 43 """ 44 if isinstance(item, item.Function): 45 if 'remote_data' in item.keywords and not HAVE_REMOTEDATA: 46 pytest.skip("skipping remotedata tests as pytest-remotedata is not installed") 47 48 49 def pytest_unconfigure(config): 50 if len(new_hash_library) > 0: 51 # Write the new hash library in JSON 52 hashfile = os.path.join(test_fig_dir, HASH_LIBRARY_NAME) 53 with open(hashfile, 'w') as outfile: 54 json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': ')) 55 56 print('All images from image tests can be found in {0}'.format(test_fig_dir)) 57 print("The corresponding hash library is {0}".format(hashfile)) 58 [end of sunpy/conftest.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sunpy/conftest.py b/sunpy/conftest.py --- a/sunpy/conftest.py +++ b/sunpy/conftest.py @@ -13,8 +13,9 @@ else: matplotlib.use('Agg') +import sunpy.tests.helpers from sunpy.tests.hash import HASH_LIBRARY_NAME -from sunpy.tests.helpers import new_hash_library, test_fig_dir +from sunpy.tests.helpers import new_hash_library from sunpy.extern import six import pytest @@ -35,6 +36,15 @@ HAVE_REMOTEDATA = remotedata_spec is not None +def pytest_addoption(parser): + parser.addoption("--figure_dir", action="store", default="./figure_test_images") + + [email protected](scope='session', autouse=True) +def figure_base_dir(request): + sunpy.tests.helpers.figure_base_dir = request.config.getoption("--figure_dir") + + def pytest_runtest_setup(item): """ pytest hook to skip all tests that have the mark 'online' if the @@ -49,9 +59,10 @@ def pytest_unconfigure(config): if len(new_hash_library) > 0: # Write the new hash library in JSON - hashfile = os.path.join(test_fig_dir, HASH_LIBRARY_NAME) + figure_base_dir = os.path.abspath(config.getoption("--figure_dir")) + hashfile = os.path.join(figure_base_dir, HASH_LIBRARY_NAME) with open(hashfile, 'w') as outfile: json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': ')) - print('All images from image tests can be found in {0}'.format(test_fig_dir)) + print('All images from image tests can be found in {0}'.format(figure_base_dir)) print("The corresponding hash library is {0}".format(hashfile))
{"golden_diff": "diff --git a/sunpy/conftest.py b/sunpy/conftest.py\n--- a/sunpy/conftest.py\n+++ b/sunpy/conftest.py\n@@ -13,8 +13,9 @@\n else:\n matplotlib.use('Agg')\n \n+import sunpy.tests.helpers\n from sunpy.tests.hash import HASH_LIBRARY_NAME\n-from sunpy.tests.helpers import new_hash_library, test_fig_dir\n+from sunpy.tests.helpers import new_hash_library\n from sunpy.extern import six\n \n import pytest\n@@ -35,6 +36,15 @@\n HAVE_REMOTEDATA = remotedata_spec is not None\n \n \n+def pytest_addoption(parser):\n+ parser.addoption(\"--figure_dir\", action=\"store\", default=\"./figure_test_images\")\n+\n+\[email protected](scope='session', autouse=True)\n+def figure_base_dir(request):\n+ sunpy.tests.helpers.figure_base_dir = request.config.getoption(\"--figure_dir\")\n+\n+\n def pytest_runtest_setup(item):\n \"\"\"\n pytest hook to skip all tests that have the mark 'online' if the\n@@ -49,9 +59,10 @@\n def pytest_unconfigure(config):\n if len(new_hash_library) > 0:\n # Write the new hash library in JSON\n- hashfile = os.path.join(test_fig_dir, HASH_LIBRARY_NAME)\n+ figure_base_dir = os.path.abspath(config.getoption(\"--figure_dir\"))\n+ hashfile = os.path.join(figure_base_dir, HASH_LIBRARY_NAME)\n with open(hashfile, 'w') as outfile:\n json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))\n \n- print('All images from image tests can be found in {0}'.format(test_fig_dir))\n+ print('All images from image tests can be found in {0}'.format(figure_base_dir))\n print(\"The corresponding hash library is {0}\".format(hashfile))\n", "issue": "Figure tests directory gets wiped after running via the test runner.\nJust discovered that when I run:\r\n```bash\r\npython setup.py test --figure-only\r\n```\r\nThe resulting figure images are saved in a tmp directory that gets wiped at the end of the process (this is under linux for me)\r\n\r\nUsing `pytest` adds the folder into my sunpy folder.\r\n\r\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function\nfrom functools import partial\n\nimport os\nimport tempfile\nimport json\n\n# Force MPL to use non-gui backends for testing.\ntry:\n import matplotlib\nexcept ImportError:\n pass\nelse:\n matplotlib.use('Agg')\n\nfrom sunpy.tests.hash import HASH_LIBRARY_NAME\nfrom sunpy.tests.helpers import new_hash_library, test_fig_dir\nfrom sunpy.extern import six\n\nimport pytest\n\n\n# Don't actually import pytest_remotedata because that can do things to the\n# entrypoints code in pytest.\nif six.PY2:\n import imp\n try:\n imp.find_module('pytest_remotedata')\n HAVE_REMOTEDATA = True\n except ImportError:\n HAVE_REMOTEDATA = False\nelse:\n import importlib\n remotedata_spec = importlib.util.find_spec(\"pytest_remotedata\")\n HAVE_REMOTEDATA = remotedata_spec is not None\n\n\ndef pytest_runtest_setup(item):\n \"\"\"\n pytest hook to skip all tests that have the mark 'online' if the\n client is online (simply detected by checking whether http://www.google.com\n can be requested).\n \"\"\"\n if isinstance(item, item.Function):\n if 'remote_data' in item.keywords and not HAVE_REMOTEDATA:\n pytest.skip(\"skipping remotedata tests as pytest-remotedata is not installed\")\n\n\ndef pytest_unconfigure(config):\n if len(new_hash_library) > 0:\n # Write the new hash library in JSON\n hashfile = os.path.join(test_fig_dir, HASH_LIBRARY_NAME)\n with open(hashfile, 'w') as outfile:\n json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))\n\n print('All images from image tests can be found in {0}'.format(test_fig_dir))\n print(\"The corresponding hash library is {0}\".format(hashfile))\n", "path": "sunpy/conftest.py"}]}
1,141
420
gh_patches_debug_27894
rasdani/github-patches
git_diff
Gallopsled__pwntools-1864
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> "pwn template" emits a Python2 shebang We should probably update this to use Python3 explicitly, since that's what we recommend. </issue> <code> [start of pwnlib/commandline/template.py] 1 #!/usr/bin/env python2 2 from __future__ import absolute_import 3 from __future__ import division 4 5 import re 6 7 from pwn import * 8 from pwnlib.commandline import common 9 10 from mako.lookup import TemplateLookup 11 12 parser = common.parser_commands.add_parser( 13 'template', 14 help = 'Generate an exploit template', 15 description = 'Generate an exploit template' 16 ) 17 18 parser.add_argument('exe', nargs='?', help='Target binary') 19 parser.add_argument('--host', help='Remote host / SSH server') 20 parser.add_argument('--port', help='Remote port / SSH port', type=int) 21 parser.add_argument('--user', help='SSH Username') 22 parser.add_argument('--pass', '--password', help='SSH Password', dest='password') 23 parser.add_argument('--path', help='Remote path of file on SSH server') 24 parser.add_argument('--quiet', help='Less verbose template comments', action='store_true') 25 parser.add_argument('--color', help='Print the output in color', choices=['never', 'always', 'auto'], default='auto') 26 27 def main(args): 28 cache = None 29 30 if cache: 31 cache = os.path.join(context.cache_dir, 'mako') 32 33 lookup = TemplateLookup( 34 directories = [os.path.join(pwnlib.data.path, 'templates')], 35 module_directory = cache 36 ) 37 38 # For the SSH scenario, check that the binary is at the 39 # same path on the remote host. 40 if args.user: 41 if not (args.path or args.exe): 42 log.error("Must specify --path or a exe") 43 44 s = ssh(args.user, args.host, args.port or 22, args.password or None) 45 46 try: 47 remote = args.path or args.exe 48 s.download(remote) 49 except Exception: 50 log.warning("Could not download file %r, opening a shell", remote) 51 s.interactive() 52 return 53 54 if not args.exe: 55 args.exe = os.path.basename(args.path) 56 57 template = lookup.get_template('pwnup.mako') 58 output = template.render(args.exe, 59 args.host, 60 args.port, 61 args.user, 62 args.password, 63 args.path, 64 args.quiet) 65 66 # Fix Mako formatting bs 67 output = re.sub('\n\n\n', '\n\n', output) 68 69 # Colorize the output if it's a TTY 70 if args.color == 'always' or (args.color == 'auto' and sys.stdout.isatty()): 71 from pygments import highlight 72 from pygments.formatters import TerminalFormatter 73 from pygments.lexers.python import PythonLexer 74 output = highlight(output, PythonLexer(), TerminalFormatter()) 75 76 print(output) 77 78 # If redirected to a file, make the resulting script executable 79 if not sys.stdout.isatty(): 80 try: os.fchmod(sys.stdout.fileno(), 0o700) 81 except OSError: pass 82 83 if __name__ == '__main__': 84 pwnlib.commandline.common.main(__file__) 85 [end of pwnlib/commandline/template.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwnlib/commandline/template.py b/pwnlib/commandline/template.py old mode 100644 new mode 100755 --- a/pwnlib/commandline/template.py +++ b/pwnlib/commandline/template.py @@ -2,8 +2,6 @@ from __future__ import absolute_import from __future__ import division -import re - from pwn import * from pwnlib.commandline import common @@ -25,14 +23,9 @@ parser.add_argument('--color', help='Print the output in color', choices=['never', 'always', 'auto'], default='auto') def main(args): - cache = None - - if cache: - cache = os.path.join(context.cache_dir, 'mako') - lookup = TemplateLookup( directories = [os.path.join(pwnlib.data.path, 'templates')], - module_directory = cache + module_directory = None ) # For the SSH scenario, check that the binary is at the @@ -44,10 +37,10 @@ s = ssh(args.user, args.host, args.port or 22, args.password or None) try: - remote = args.path or args.exe - s.download(remote) + remote_file = args.path or args.exe + s.download(remote_file) except Exception: - log.warning("Could not download file %r, opening a shell", remote) + log.warning("Could not download file %r, opening a shell", remote_file) s.interactive() return
{"golden_diff": "diff --git a/pwnlib/commandline/template.py b/pwnlib/commandline/template.py\nold mode 100644\nnew mode 100755\n--- a/pwnlib/commandline/template.py\n+++ b/pwnlib/commandline/template.py\n@@ -2,8 +2,6 @@\n from __future__ import absolute_import\n from __future__ import division\n \n-import re\n-\n from pwn import *\n from pwnlib.commandline import common\n \n@@ -25,14 +23,9 @@\n parser.add_argument('--color', help='Print the output in color', choices=['never', 'always', 'auto'], default='auto')\n \n def main(args):\n- cache = None\n-\n- if cache:\n- cache = os.path.join(context.cache_dir, 'mako')\n-\n lookup = TemplateLookup(\n directories = [os.path.join(pwnlib.data.path, 'templates')],\n- module_directory = cache\n+ module_directory = None\n )\n \n # For the SSH scenario, check that the binary is at the\n@@ -44,10 +37,10 @@\n s = ssh(args.user, args.host, args.port or 22, args.password or None)\n \n try:\n- remote = args.path or args.exe\n- s.download(remote)\n+ remote_file = args.path or args.exe\n+ s.download(remote_file)\n except Exception:\n- log.warning(\"Could not download file %r, opening a shell\", remote)\n+ log.warning(\"Could not download file %r, opening a shell\", remote_file)\n s.interactive()\n return\n", "issue": "\"pwn template\" emits a Python2 shebang\nWe should probably update this to use Python3 explicitly, since that's what we recommend.\n", "before_files": [{"content": "#!/usr/bin/env python2\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport re\n\nfrom pwn import *\nfrom pwnlib.commandline import common\n\nfrom mako.lookup import TemplateLookup\n\nparser = common.parser_commands.add_parser(\n 'template',\n help = 'Generate an exploit template',\n description = 'Generate an exploit template'\n)\n\nparser.add_argument('exe', nargs='?', help='Target binary')\nparser.add_argument('--host', help='Remote host / SSH server')\nparser.add_argument('--port', help='Remote port / SSH port', type=int)\nparser.add_argument('--user', help='SSH Username')\nparser.add_argument('--pass', '--password', help='SSH Password', dest='password')\nparser.add_argument('--path', help='Remote path of file on SSH server')\nparser.add_argument('--quiet', help='Less verbose template comments', action='store_true')\nparser.add_argument('--color', help='Print the output in color', choices=['never', 'always', 'auto'], default='auto')\n\ndef main(args):\n cache = None\n\n if cache:\n cache = os.path.join(context.cache_dir, 'mako')\n\n lookup = TemplateLookup(\n directories = [os.path.join(pwnlib.data.path, 'templates')],\n module_directory = cache\n )\n\n # For the SSH scenario, check that the binary is at the\n # same path on the remote host.\n if args.user:\n if not (args.path or args.exe):\n log.error(\"Must specify --path or a exe\")\n\n s = ssh(args.user, args.host, args.port or 22, args.password or None)\n\n try:\n remote = args.path or args.exe\n s.download(remote)\n except Exception:\n log.warning(\"Could not download file %r, opening a shell\", remote)\n s.interactive()\n return\n\n if not args.exe:\n args.exe = os.path.basename(args.path)\n\n template = lookup.get_template('pwnup.mako')\n output = template.render(args.exe,\n args.host,\n args.port,\n args.user,\n args.password,\n args.path,\n args.quiet)\n\n # Fix Mako formatting bs\n output = re.sub('\\n\\n\\n', '\\n\\n', output)\n\n # Colorize the output if it's a TTY\n if args.color == 'always' or (args.color == 'auto' and sys.stdout.isatty()):\n from pygments import highlight\n from pygments.formatters import TerminalFormatter\n from pygments.lexers.python import PythonLexer\n output = highlight(output, PythonLexer(), TerminalFormatter())\n\n print(output)\n\n # If redirected to a file, make the resulting script executable\n if not sys.stdout.isatty():\n try: os.fchmod(sys.stdout.fileno(), 0o700)\n except OSError: pass\n\nif __name__ == '__main__':\n pwnlib.commandline.common.main(__file__)\n", "path": "pwnlib/commandline/template.py"}]}
1,371
356
gh_patches_debug_15484
rasdani/github-patches
git_diff
pypi__warehouse-3239
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Legacy and Warehouse RSS feeds differ @andrew asked in https://github.com/librariesio/libraries.io/issues/2024#issuecomment-372638824 about Warehouse's RSS feeds: > * https://pypi.org/rss/updates.xml > * https://pypi.org/rss/packages.xml > Which I expected the contents to match the old ones but currently don't: > * https://pypi.python.org/pypi?%3Aaction=rss > * https://pypi.python.org/pypi?%3Aaction=packages_rss I've verified through visual inspection that the data in the legacy RSS feed and the data in the Warehouse RSS feeds differ. This is a bug in the feeds or a bug in the docs. Currently our [feeds documentation](https://warehouse.readthedocs.io/api-reference/feeds/) and [Warehouse migration guide](https://warehouse.readthedocs.io/api-reference/integration-guide/) don't say anything about deliberate differences between the legacy and Warehouse RSS feeds. We can update the docs if there's a deliberate reason for the difference. </issue> <code> [start of warehouse/rss/views.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 from pyramid.view import view_config 14 from sqlalchemy.orm import joinedload 15 16 from warehouse.cache.origin import origin_cache 17 from warehouse.packaging.models import Project, Release 18 from warehouse.xml import XML_CSP 19 20 21 @view_config( 22 route_name="rss.updates", 23 renderer="rss/updates.xml", 24 decorator=[ 25 origin_cache( 26 1 * 24 * 60 * 60, # 1 day 27 stale_while_revalidate=1 * 24 * 60 * 60, # 1 day 28 stale_if_error=5 * 24 * 60 * 60, # 5 days 29 ), 30 ], 31 ) 32 def rss_updates(request): 33 request.response.content_type = "text/xml" 34 35 request.find_service(name="csp").merge(XML_CSP) 36 37 latest_releases = ( 38 request.db.query(Release) 39 .options(joinedload(Release.project)) 40 .order_by(Release.created.desc()) 41 .limit(40) 42 .all() 43 ) 44 45 return {"latest_releases": latest_releases} 46 47 48 @view_config( 49 route_name="rss.packages", 50 renderer="rss/packages.xml", 51 decorator=[ 52 origin_cache( 53 1 * 24 * 60 * 60, # 1 day 54 stale_while_revalidate=1 * 24 * 60 * 60, # 1 day 55 stale_if_error=5 * 24 * 60 * 60, # 5 days 56 ), 57 ], 58 ) 59 def rss_packages(request): 60 request.response.content_type = "text/xml" 61 62 request.find_service(name="csp").merge(XML_CSP) 63 64 newest_projects = ( 65 request.db.query(Project) 66 .options(joinedload(Project.releases, innerjoin=True)) 67 .order_by(Project.created.desc()) 68 .limit(40) 69 .all() 70 ) 71 72 return {"newest_projects": newest_projects} 73 [end of warehouse/rss/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/warehouse/rss/views.py b/warehouse/rss/views.py --- a/warehouse/rss/views.py +++ b/warehouse/rss/views.py @@ -26,6 +26,7 @@ 1 * 24 * 60 * 60, # 1 day stale_while_revalidate=1 * 24 * 60 * 60, # 1 day stale_if_error=5 * 24 * 60 * 60, # 5 days + keys=["all-projects"], ), ], ) @@ -53,6 +54,7 @@ 1 * 24 * 60 * 60, # 1 day stale_while_revalidate=1 * 24 * 60 * 60, # 1 day stale_if_error=5 * 24 * 60 * 60, # 5 days + keys=["all-projects"], ), ], )
{"golden_diff": "diff --git a/warehouse/rss/views.py b/warehouse/rss/views.py\n--- a/warehouse/rss/views.py\n+++ b/warehouse/rss/views.py\n@@ -26,6 +26,7 @@\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n+ keys=[\"all-projects\"],\n ),\n ],\n )\n@@ -53,6 +54,7 @@\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n+ keys=[\"all-projects\"],\n ),\n ],\n )\n", "issue": "Legacy and Warehouse RSS feeds differ\n@andrew asked in https://github.com/librariesio/libraries.io/issues/2024#issuecomment-372638824 about Warehouse's RSS feeds:\r\n\r\n> * https://pypi.org/rss/updates.xml\r\n> * https://pypi.org/rss/packages.xml\r\n\r\n> Which I expected the contents to match the old ones but currently don't:\r\n\r\n> * https://pypi.python.org/pypi?%3Aaction=rss\r\n> * https://pypi.python.org/pypi?%3Aaction=packages_rss\r\n\r\nI've verified through visual inspection that the data in the legacy RSS feed and the data in the Warehouse RSS feeds differ.\r\n\r\nThis is a bug in the feeds or a bug in the docs. Currently our [feeds documentation](https://warehouse.readthedocs.io/api-reference/feeds/) and [Warehouse migration guide](https://warehouse.readthedocs.io/api-reference/integration-guide/) don't say anything about deliberate differences between the legacy and Warehouse RSS feeds. We can update the docs if there's a deliberate reason for the difference.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import Project, Release\nfrom warehouse.xml import XML_CSP\n\n\n@view_config(\n route_name=\"rss.updates\",\n renderer=\"rss/updates.xml\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef rss_updates(request):\n request.response.content_type = \"text/xml\"\n\n request.find_service(name=\"csp\").merge(XML_CSP)\n\n latest_releases = (\n request.db.query(Release)\n .options(joinedload(Release.project))\n .order_by(Release.created.desc())\n .limit(40)\n .all()\n )\n\n return {\"latest_releases\": latest_releases}\n\n\n@view_config(\n route_name=\"rss.packages\",\n renderer=\"rss/packages.xml\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef rss_packages(request):\n request.response.content_type = \"text/xml\"\n\n request.find_service(name=\"csp\").merge(XML_CSP)\n\n newest_projects = (\n request.db.query(Project)\n .options(joinedload(Project.releases, innerjoin=True))\n .order_by(Project.created.desc())\n .limit(40)\n .all()\n )\n\n return {\"newest_projects\": newest_projects}\n", "path": "warehouse/rss/views.py"}]}
1,449
229
gh_patches_debug_34793
rasdani/github-patches
git_diff
cloud-custodian__cloud-custodian-2571
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> azure - lower az log verbosity when running custodian commands without -v currently, c7n_azure will log info about the session it's using with normal c7n commands and is much more verbose than what it used to be, moved the log level from info to debug so it's still accessible with the -v flag ``` $ custodian schema network-addr.filters.shield-enabled -v 2018-06-19 09:42:36,028: cli.azure.cli.core:DEBUG Current cloud config: AzureCloud 2018-06-19 09:42:36,029: custodian.azure.session:DEBUG Creating session with Azure CLI Authentication 2018-06-19 09:42:36,029: custodian.azure.session:DEBUG Session using Subscription ID: xxxxxxxxxxxxxxxxxxxxxxxxxxx Help ---- The most base type Schema ------ { "additionalProperties": false, "required": [ "type" ], "type": "object", "properties": { "state": { "type": "boolean" }, "type": { "enum": [ "shield-enabled" ] } } } ``` </issue> <code> [start of tools/c7n_azure/c7n_azure/session.py] 1 # Copyright 2018 Capital One Services, LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import importlib 16 import os 17 import logging 18 from azure.cli.core.cloud import AZURE_PUBLIC_CLOUD 19 from azure.cli.core._profile import Profile 20 from azure.common.credentials import ServicePrincipalCredentials, BasicTokenAuthentication 21 from c7n_azure.utils import ResourceIdParser 22 23 24 class Session(object): 25 26 def __init__(self, subscription_id=None): 27 """ 28 Creates a session using available authentication type. 29 30 Auth priority: 31 1. Token Auth 32 2. Tenant Auth 33 3. Azure CLI Auth 34 35 :param subscription_id: If provided, overrides environment variables. 36 """ 37 38 self.log = logging.getLogger('custodian.azure.session') 39 self._provider_cache = {} 40 41 tenant_auth_variables = [ 42 'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID', 43 'AZURE_CLIENT_ID', 'AZURE_CLIENT_SECRET' 44 ] 45 token_auth_variables = ['AZURE_ACCESS_TOKEN', 'AZURE_SUBSCRIPTION_ID'] 46 47 if all(k in os.environ for k in token_auth_variables): 48 # Token authentication 49 self.credentials = BasicTokenAuthentication( 50 token={ 51 'access_token': os.environ['AZURE_ACCESS_TOKEN'] 52 }) 53 self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID'] 54 self.log.info("Creating session with Token Authentication") 55 56 elif all(k in os.environ for k in tenant_auth_variables): 57 # Tenant (service principal) authentication 58 self.credentials = ServicePrincipalCredentials( 59 client_id=os.environ['AZURE_CLIENT_ID'], 60 secret=os.environ['AZURE_CLIENT_SECRET'], 61 tenant=os.environ['AZURE_TENANT_ID'] 62 ) 63 self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID'] 64 self.tenant_id = os.environ['AZURE_TENANT_ID'] 65 self.log.info("Creating session with Service Principal Authentication") 66 67 else: 68 # Azure CLI authentication 69 (self.credentials, 70 self.subscription_id, 71 self.tenant_id) = Profile().get_login_credentials( 72 resource=AZURE_PUBLIC_CLOUD.endpoints.active_directory_resource_id) 73 self.log.info("Creating session with Azure CLI Authentication") 74 75 # Let provided id parameter override everything else 76 if subscription_id is not None: 77 self.subscription_id = subscription_id 78 79 self.log.info("Session using Subscription ID: %s" % self.subscription_id) 80 81 if self.credentials is None: 82 self.log.error('Unable to locate credentials for Azure session.') 83 84 def client(self, client): 85 service_name, client_name = client.rsplit('.', 1) 86 svc_module = importlib.import_module(service_name) 87 klass = getattr(svc_module, client_name) 88 return klass(self.credentials, self.subscription_id) 89 90 def resource_api_version(self, resource_id): 91 """ latest non-preview api version for resource """ 92 93 namespace = ResourceIdParser.get_namespace(resource_id) 94 resource_type = ResourceIdParser.get_resource_type(resource_id) 95 96 if resource_type in self._provider_cache: 97 return self._provider_cache[resource_type] 98 99 resource_client = self.client('azure.mgmt.resource.ResourceManagementClient') 100 provider = resource_client.providers.get(namespace) 101 102 rt = next((t for t in provider.resource_types 103 if t.resource_type == str(resource_type).split('/')[-1]), None) 104 if rt and rt.api_versions: 105 versions = [v for v in rt.api_versions if 'preview' not in v.lower()] 106 api_version = versions[0] if versions else rt.api_versions[0] 107 self._provider_cache[resource_type] = api_version 108 return api_version 109 [end of tools/c7n_azure/c7n_azure/session.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/c7n_azure/c7n_azure/session.py b/tools/c7n_azure/c7n_azure/session.py --- a/tools/c7n_azure/c7n_azure/session.py +++ b/tools/c7n_azure/c7n_azure/session.py @@ -24,6 +24,19 @@ class Session(object): def __init__(self, subscription_id=None): + """ + :param subscription_id: If provided overrides environment variables. + + """ + + self.log = logging.getLogger('custodian.azure.session') + self._provider_cache = {} + self.subscription_id_override = subscription_id + self.credentials = None + self.subscription_id = None + self.tenant_id = None + + def _initialize_session(self): """ Creates a session using available authentication type. @@ -32,11 +45,11 @@ 2. Tenant Auth 3. Azure CLI Auth - :param subscription_id: If provided, overrides environment variables. """ - self.log = logging.getLogger('custodian.azure.session') - self._provider_cache = {} + # Only run once + if self.credentials is not None: + return tenant_auth_variables = [ 'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID', @@ -73,8 +86,8 @@ self.log.info("Creating session with Azure CLI Authentication") # Let provided id parameter override everything else - if subscription_id is not None: - self.subscription_id = subscription_id + if self.subscription_id_override is not None: + self.subscription_id = self.subscription_id_override self.log.info("Session using Subscription ID: %s" % self.subscription_id) @@ -82,6 +95,7 @@ self.log.error('Unable to locate credentials for Azure session.') def client(self, client): + self._initialize_session() service_name, client_name = client.rsplit('.', 1) svc_module = importlib.import_module(service_name) klass = getattr(svc_module, client_name)
{"golden_diff": "diff --git a/tools/c7n_azure/c7n_azure/session.py b/tools/c7n_azure/c7n_azure/session.py\n--- a/tools/c7n_azure/c7n_azure/session.py\n+++ b/tools/c7n_azure/c7n_azure/session.py\n@@ -24,6 +24,19 @@\n class Session(object):\n \n def __init__(self, subscription_id=None):\n+ \"\"\"\n+ :param subscription_id: If provided overrides environment variables.\n+\n+ \"\"\"\n+\n+ self.log = logging.getLogger('custodian.azure.session')\n+ self._provider_cache = {}\n+ self.subscription_id_override = subscription_id\n+ self.credentials = None\n+ self.subscription_id = None\n+ self.tenant_id = None\n+\n+ def _initialize_session(self):\n \"\"\"\n Creates a session using available authentication type.\n \n@@ -32,11 +45,11 @@\n 2. Tenant Auth\n 3. Azure CLI Auth\n \n- :param subscription_id: If provided, overrides environment variables.\n \"\"\"\n \n- self.log = logging.getLogger('custodian.azure.session')\n- self._provider_cache = {}\n+ # Only run once\n+ if self.credentials is not None:\n+ return\n \n tenant_auth_variables = [\n 'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID',\n@@ -73,8 +86,8 @@\n self.log.info(\"Creating session with Azure CLI Authentication\")\n \n # Let provided id parameter override everything else\n- if subscription_id is not None:\n- self.subscription_id = subscription_id\n+ if self.subscription_id_override is not None:\n+ self.subscription_id = self.subscription_id_override\n \n self.log.info(\"Session using Subscription ID: %s\" % self.subscription_id)\n \n@@ -82,6 +95,7 @@\n self.log.error('Unable to locate credentials for Azure session.')\n \n def client(self, client):\n+ self._initialize_session()\n service_name, client_name = client.rsplit('.', 1)\n svc_module = importlib.import_module(service_name)\n klass = getattr(svc_module, client_name)\n", "issue": "azure - lower az log verbosity when running custodian commands without -v\ncurrently, c7n_azure will log info about the session it's using with normal c7n commands and is much more verbose than what it used to be, moved the log level from info to debug so it's still accessible with the -v flag\r\n\r\n```\r\n$ custodian schema network-addr.filters.shield-enabled -v\r\n2018-06-19 09:42:36,028: cli.azure.cli.core:DEBUG Current cloud config:\r\nAzureCloud\r\n2018-06-19 09:42:36,029: custodian.azure.session:DEBUG Creating session with Azure CLI Authentication\r\n2018-06-19 09:42:36,029: custodian.azure.session:DEBUG Session using Subscription ID: xxxxxxxxxxxxxxxxxxxxxxxxxxx\r\n\r\nHelp\r\n----\r\n\r\nThe most base type\r\n\r\nSchema\r\n------\r\n\r\n{\r\n \"additionalProperties\": false,\r\n \"required\": [\r\n \"type\"\r\n ],\r\n \"type\": \"object\",\r\n \"properties\": {\r\n \"state\": {\r\n \"type\": \"boolean\"\r\n },\r\n \"type\": {\r\n \"enum\": [\r\n \"shield-enabled\"\r\n ]\r\n }\r\n }\r\n}\r\n```\n", "before_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport importlib\nimport os\nimport logging\nfrom azure.cli.core.cloud import AZURE_PUBLIC_CLOUD\nfrom azure.cli.core._profile import Profile\nfrom azure.common.credentials import ServicePrincipalCredentials, BasicTokenAuthentication\nfrom c7n_azure.utils import ResourceIdParser\n\n\nclass Session(object):\n\n def __init__(self, subscription_id=None):\n \"\"\"\n Creates a session using available authentication type.\n\n Auth priority:\n 1. Token Auth\n 2. Tenant Auth\n 3. Azure CLI Auth\n\n :param subscription_id: If provided, overrides environment variables.\n \"\"\"\n\n self.log = logging.getLogger('custodian.azure.session')\n self._provider_cache = {}\n\n tenant_auth_variables = [\n 'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID',\n 'AZURE_CLIENT_ID', 'AZURE_CLIENT_SECRET'\n ]\n token_auth_variables = ['AZURE_ACCESS_TOKEN', 'AZURE_SUBSCRIPTION_ID']\n\n if all(k in os.environ for k in token_auth_variables):\n # Token authentication\n self.credentials = BasicTokenAuthentication(\n token={\n 'access_token': os.environ['AZURE_ACCESS_TOKEN']\n })\n self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']\n self.log.info(\"Creating session with Token Authentication\")\n\n elif all(k in os.environ for k in tenant_auth_variables):\n # Tenant (service principal) authentication\n self.credentials = ServicePrincipalCredentials(\n client_id=os.environ['AZURE_CLIENT_ID'],\n secret=os.environ['AZURE_CLIENT_SECRET'],\n tenant=os.environ['AZURE_TENANT_ID']\n )\n self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']\n self.tenant_id = os.environ['AZURE_TENANT_ID']\n self.log.info(\"Creating session with Service Principal Authentication\")\n\n else:\n # Azure CLI authentication\n (self.credentials,\n self.subscription_id,\n self.tenant_id) = Profile().get_login_credentials(\n resource=AZURE_PUBLIC_CLOUD.endpoints.active_directory_resource_id)\n self.log.info(\"Creating session with Azure CLI Authentication\")\n\n # Let provided id parameter override everything else\n if subscription_id is not None:\n self.subscription_id = subscription_id\n\n self.log.info(\"Session using Subscription ID: %s\" % self.subscription_id)\n\n if self.credentials is None:\n self.log.error('Unable to locate credentials for Azure session.')\n\n def client(self, client):\n service_name, client_name = client.rsplit('.', 1)\n svc_module = importlib.import_module(service_name)\n klass = getattr(svc_module, client_name)\n return klass(self.credentials, self.subscription_id)\n\n def resource_api_version(self, resource_id):\n \"\"\" latest non-preview api version for resource \"\"\"\n\n namespace = ResourceIdParser.get_namespace(resource_id)\n resource_type = ResourceIdParser.get_resource_type(resource_id)\n\n if resource_type in self._provider_cache:\n return self._provider_cache[resource_type]\n\n resource_client = self.client('azure.mgmt.resource.ResourceManagementClient')\n provider = resource_client.providers.get(namespace)\n\n rt = next((t for t in provider.resource_types\n if t.resource_type == str(resource_type).split('/')[-1]), None)\n if rt and rt.api_versions:\n versions = [v for v in rt.api_versions if 'preview' not in v.lower()]\n api_version = versions[0] if versions else rt.api_versions[0]\n self._provider_cache[resource_type] = api_version\n return api_version\n", "path": "tools/c7n_azure/c7n_azure/session.py"}]}
1,934
470
gh_patches_debug_27090
rasdani/github-patches
git_diff
data-for-change__anyway-720
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Clusters view still very heavy I uploaded the code to a new server, following #463. The basic markers view is better and faster, but the clusters is still heavy and might cause server failure (I think the query is hogging the DB). </issue> <code> [start of clusters_calculator.py] 1 from models import Marker 2 from static.pymapcluster import calculate_clusters 3 import logging 4 import concurrent.futures 5 import multiprocessing 6 7 8 def retrieve_clusters(**kwargs): 9 marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng']) 10 result_futures = [] 11 logging.info('number of cores: ' + str(multiprocessing.cpu_count())) 12 with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor: 13 for marker_box in marker_boxes: 14 15 kwargs.update(marker_box) 16 markers_in_box = Marker.bounding_box_query(**kwargs).markers.all() 17 result_futures.append(executor.submit(calculate_clusters, markers_in_box, kwargs['zoom'])) 18 19 completed_futures = concurrent.futures.wait(result_futures) 20 result = [] 21 for future in completed_futures.done: 22 result.extend(future.result()) 23 24 return result 25 26 27 def divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng): 28 cpu_count = multiprocessing.cpu_count() 29 lat_box_size = (ne_lat - sw_lat) / cpu_count 30 # lng_box_size = (sw_lng - ne_lng) / cpu_count 31 boxes = [] 32 for i in xrange(cpu_count): 33 # TODO: the below calculation is using sw_lat as first param instead of ne_lat. Plz verify my fix for that: 34 # boxes.append((sw_lat + (i + 1) * lat_box_size, ne_lng, sw_lat + i * lat_box_size, sw_lng)) 35 boxes.append({'ne_lat': ne_lat + (i + 1) * lat_box_size, 'ne_lng': ne_lng, 36 'sw_lat': sw_lat + i * lat_box_size, 'sw_lng': sw_lng}) 37 38 return boxes 39 [end of clusters_calculator.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/clusters_calculator.py b/clusters_calculator.py --- a/clusters_calculator.py +++ b/clusters_calculator.py @@ -1,27 +1,25 @@ +import itertools +from celery import Celery, group from models import Marker from static.pymapcluster import calculate_clusters -import logging -import concurrent.futures import multiprocessing -def retrieve_clusters(**kwargs): - marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng']) - result_futures = [] - logging.info('number of cores: ' + str(multiprocessing.cpu_count())) - with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor: - for marker_box in marker_boxes: +celery_app = Celery('tasks', backend='rpc://', broker='pyamqp://guest@localhost//') - kwargs.update(marker_box) - markers_in_box = Marker.bounding_box_query(**kwargs).markers.all() - result_futures.append(executor.submit(calculate_clusters, markers_in_box, kwargs['zoom'])) +@celery_app.task +def calculate_marker_box(kwargs, marker_box): + kwargs.update(marker_box) + markers_in_box = Marker.bounding_box_query(**kwargs).markers.all() + return calculate_clusters(markers_in_box, kwargs['zoom']) - completed_futures = concurrent.futures.wait(result_futures) - result = [] - for future in completed_futures.done: - result.extend(future.result()) - return result +def retrieve_clusters(**kwargs): + marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng']) + job = group([calculate_marker_box.s(kwargs, marker_box) for marker_box in marker_boxes]) + result = job.apply_async() + result.join() + return list(itertools.chain.from_iterable(result.get())) def divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng):
{"golden_diff": "diff --git a/clusters_calculator.py b/clusters_calculator.py\n--- a/clusters_calculator.py\n+++ b/clusters_calculator.py\n@@ -1,27 +1,25 @@\n+import itertools\n+from celery import Celery, group\n from models import Marker\n from static.pymapcluster import calculate_clusters\n-import logging\n-import concurrent.futures\n import multiprocessing\n \n \n-def retrieve_clusters(**kwargs):\n- marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])\n- result_futures = []\n- logging.info('number of cores: ' + str(multiprocessing.cpu_count()))\n- with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:\n- for marker_box in marker_boxes:\n+celery_app = Celery('tasks', backend='rpc://', broker='pyamqp://guest@localhost//')\n \n- kwargs.update(marker_box)\n- markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()\n- result_futures.append(executor.submit(calculate_clusters, markers_in_box, kwargs['zoom']))\n+@celery_app.task\n+def calculate_marker_box(kwargs, marker_box):\n+ kwargs.update(marker_box)\n+ markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()\n+ return calculate_clusters(markers_in_box, kwargs['zoom'])\n \n- completed_futures = concurrent.futures.wait(result_futures)\n- result = []\n- for future in completed_futures.done:\n- result.extend(future.result())\n \n- return result\n+def retrieve_clusters(**kwargs):\n+ marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])\n+ job = group([calculate_marker_box.s(kwargs, marker_box) for marker_box in marker_boxes])\n+ result = job.apply_async()\n+ result.join()\n+ return list(itertools.chain.from_iterable(result.get()))\n \n \n def divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng):\n", "issue": "Clusters view still very heavy\nI uploaded the code to a new server, following #463.\r\nThe basic markers view is better and faster, but the clusters is still heavy and might cause server failure (I think the query is hogging the DB).\n", "before_files": [{"content": "from models import Marker\nfrom static.pymapcluster import calculate_clusters\nimport logging\nimport concurrent.futures\nimport multiprocessing\n\n\ndef retrieve_clusters(**kwargs):\n marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])\n result_futures = []\n logging.info('number of cores: ' + str(multiprocessing.cpu_count()))\n with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:\n for marker_box in marker_boxes:\n\n kwargs.update(marker_box)\n markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()\n result_futures.append(executor.submit(calculate_clusters, markers_in_box, kwargs['zoom']))\n\n completed_futures = concurrent.futures.wait(result_futures)\n result = []\n for future in completed_futures.done:\n result.extend(future.result())\n\n return result\n\n\ndef divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng):\n cpu_count = multiprocessing.cpu_count()\n lat_box_size = (ne_lat - sw_lat) / cpu_count\n # lng_box_size = (sw_lng - ne_lng) / cpu_count\n boxes = []\n for i in xrange(cpu_count):\n # TODO: the below calculation is using sw_lat as first param instead of ne_lat. Plz verify my fix for that:\n # boxes.append((sw_lat + (i + 1) * lat_box_size, ne_lng, sw_lat + i * lat_box_size, sw_lng))\n boxes.append({'ne_lat': ne_lat + (i + 1) * lat_box_size, 'ne_lng': ne_lng,\n 'sw_lat': sw_lat + i * lat_box_size, 'sw_lng': sw_lng})\n\n return boxes\n", "path": "clusters_calculator.py"}]}
1,034
444
gh_patches_debug_4555
rasdani/github-patches
git_diff
kubeflow__pipelines-9157
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [sdk] Containerized Python Component module not found error There is a bug when building a containerized Python component that happens (at least) in the case when the longest path of the import graph ending at the component involves >2 modules. ### Environment KFP SDK 2.0.0-beta.6 ### Steps to reproduce For example: ```python # component.py from module_one import one from kfp import dsl @dsl.component def comp(): ... ``` ```python # module_one.py from module_two import two one = 1 ``` ```python # module_two.py two = 2 ``` Then: `kfp component build .` You get a `No module named` error. ### Expected result Should build without an error. ### Materials and Reference Related: https://github.com/kubeflow/pipelines/issues/8353 </issue> <code> [start of sdk/python/kfp/components/utils.py] 1 # Copyright 2021 The Kubeflow Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """Definitions of utils methods.""" 15 16 import importlib 17 import os 18 import re 19 import sys 20 import types 21 from typing import List 22 23 _COMPONENT_NAME_PREFIX = 'comp-' 24 _EXECUTOR_LABEL_PREFIX = 'exec-' 25 26 27 def load_module(module_name: str, module_directory: str) -> types.ModuleType: 28 """Dynamically imports the Python module with the given name and package 29 path. 30 31 E.g., Assuming there is a file called `my_module.py` under 32 `/some/directory/my_module`, we can use:: 33 34 load_module('my_module', '/some/directory') 35 36 to effectively `import mymodule`. 37 38 Args: 39 module_name: The name of the module. 40 package_path: The package under which the specified module resides. 41 """ 42 module_spec = importlib.util.spec_from_file_location( 43 name=module_name, 44 location=os.path.join(module_directory, f'{module_name}.py')) 45 module = importlib.util.module_from_spec(module_spec) 46 sys.modules[module_spec.name] = module 47 module_spec.loader.exec_module(module) 48 return module 49 50 51 def maybe_rename_for_k8s(name: str) -> str: 52 """Cleans and converts a name to be k8s compatible. 53 54 Args: 55 name: The original name. 56 57 Returns: 58 A sanitized name. 59 """ 60 return re.sub('-+', '-', re.sub('[^-0-9a-z]+', '-', 61 name.lower())).lstrip('-').rstrip('-') 62 63 64 def sanitize_input_name(name: str) -> str: 65 """Sanitizes input name.""" 66 return re.sub('[^_0-9a-z]+', '_', name.lower()).lstrip('_').rstrip('_') 67 68 69 def sanitize_component_name(name: str) -> str: 70 """Sanitizes component name.""" 71 return _COMPONENT_NAME_PREFIX + maybe_rename_for_k8s(name) 72 73 74 def sanitize_task_name(name: str) -> str: 75 """Sanitizes task name.""" 76 return maybe_rename_for_k8s(name) 77 78 79 def sanitize_executor_label(label: str) -> str: 80 """Sanitizes executor label.""" 81 return _EXECUTOR_LABEL_PREFIX + maybe_rename_for_k8s(label) 82 83 84 def make_name_unique_by_adding_index( 85 name: str, 86 collection: List[str], 87 delimiter: str, 88 ) -> str: 89 """Makes a unique name by adding index. 90 91 The index starts from 2 and increase by 1 until we find a unique name. 92 93 Args: 94 name: The original name. 95 collection: The collection of existing names. 96 delimiter: The delimiter to connect the original name and an index. 97 98 Returns: 99 A unique name composed of name+delimiter+next index 100 """ 101 unique_name = name 102 if unique_name in collection: 103 for i in range(2, sys.maxsize**10): 104 unique_name = name + delimiter + str(i) 105 if unique_name not in collection: 106 break 107 return unique_name 108 109 110 def validate_pipeline_name(name: str) -> None: 111 """Validate pipeline name. 112 113 A valid pipeline name should match ^[a-z0-9][a-z0-9-]{0,127}$. 114 115 Args: 116 name: The pipeline name. 117 118 Raises: 119 ValueError if the pipeline name doesn't conform to the regular expression. 120 """ 121 pattern = re.compile(r'^[a-z0-9][a-z0-9-]{0,127}$') 122 if not pattern.match(name): 123 raise ValueError( 124 'Invalid pipeline name: %s.\n' 125 'Please specify a pipeline name that matches the regular ' 126 'expression "^[a-z0-9][a-z0-9-]{0,127}$" using ' 127 '`dsl.pipeline(name=...)` decorator.' % name) 128 [end of sdk/python/kfp/components/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sdk/python/kfp/components/utils.py b/sdk/python/kfp/components/utils.py --- a/sdk/python/kfp/components/utils.py +++ b/sdk/python/kfp/components/utils.py @@ -44,6 +44,7 @@ location=os.path.join(module_directory, f'{module_name}.py')) module = importlib.util.module_from_spec(module_spec) sys.modules[module_spec.name] = module + sys.path.insert(0, str(module_directory)) module_spec.loader.exec_module(module) return module
{"golden_diff": "diff --git a/sdk/python/kfp/components/utils.py b/sdk/python/kfp/components/utils.py\n--- a/sdk/python/kfp/components/utils.py\n+++ b/sdk/python/kfp/components/utils.py\n@@ -44,6 +44,7 @@\n location=os.path.join(module_directory, f'{module_name}.py'))\n module = importlib.util.module_from_spec(module_spec)\n sys.modules[module_spec.name] = module\n+ sys.path.insert(0, str(module_directory))\n module_spec.loader.exec_module(module)\n return module\n", "issue": "[sdk] Containerized Python Component module not found error \nThere is a bug when building a containerized Python component that happens (at least) in the case when the longest path of the import graph ending at the component involves >2 modules. \r\n\r\n### Environment\r\nKFP SDK 2.0.0-beta.6\r\n\r\n### Steps to reproduce\r\nFor example:\r\n\r\n```python\r\n# component.py\r\nfrom module_one import one\r\nfrom kfp import dsl\r\n\r\[email protected]\r\ndef comp(): ...\r\n```\r\n\r\n```python\r\n# module_one.py\r\nfrom module_two import two\r\none = 1\r\n```\r\n\r\n```python\r\n# module_two.py\r\ntwo = 2\r\n```\r\n\r\nThen: `kfp component build .`\r\n\r\nYou get a `No module named` error.\r\n\r\n### Expected result\r\n\r\nShould build without an error.\r\n\r\n### Materials and Reference\r\nRelated: https://github.com/kubeflow/pipelines/issues/8353\n", "before_files": [{"content": "# Copyright 2021 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Definitions of utils methods.\"\"\"\n\nimport importlib\nimport os\nimport re\nimport sys\nimport types\nfrom typing import List\n\n_COMPONENT_NAME_PREFIX = 'comp-'\n_EXECUTOR_LABEL_PREFIX = 'exec-'\n\n\ndef load_module(module_name: str, module_directory: str) -> types.ModuleType:\n \"\"\"Dynamically imports the Python module with the given name and package\n path.\n\n E.g., Assuming there is a file called `my_module.py` under\n `/some/directory/my_module`, we can use::\n\n load_module('my_module', '/some/directory')\n\n to effectively `import mymodule`.\n\n Args:\n module_name: The name of the module.\n package_path: The package under which the specified module resides.\n \"\"\"\n module_spec = importlib.util.spec_from_file_location(\n name=module_name,\n location=os.path.join(module_directory, f'{module_name}.py'))\n module = importlib.util.module_from_spec(module_spec)\n sys.modules[module_spec.name] = module\n module_spec.loader.exec_module(module)\n return module\n\n\ndef maybe_rename_for_k8s(name: str) -> str:\n \"\"\"Cleans and converts a name to be k8s compatible.\n\n Args:\n name: The original name.\n\n Returns:\n A sanitized name.\n \"\"\"\n return re.sub('-+', '-', re.sub('[^-0-9a-z]+', '-',\n name.lower())).lstrip('-').rstrip('-')\n\n\ndef sanitize_input_name(name: str) -> str:\n \"\"\"Sanitizes input name.\"\"\"\n return re.sub('[^_0-9a-z]+', '_', name.lower()).lstrip('_').rstrip('_')\n\n\ndef sanitize_component_name(name: str) -> str:\n \"\"\"Sanitizes component name.\"\"\"\n return _COMPONENT_NAME_PREFIX + maybe_rename_for_k8s(name)\n\n\ndef sanitize_task_name(name: str) -> str:\n \"\"\"Sanitizes task name.\"\"\"\n return maybe_rename_for_k8s(name)\n\n\ndef sanitize_executor_label(label: str) -> str:\n \"\"\"Sanitizes executor label.\"\"\"\n return _EXECUTOR_LABEL_PREFIX + maybe_rename_for_k8s(label)\n\n\ndef make_name_unique_by_adding_index(\n name: str,\n collection: List[str],\n delimiter: str,\n) -> str:\n \"\"\"Makes a unique name by adding index.\n\n The index starts from 2 and increase by 1 until we find a unique name.\n\n Args:\n name: The original name.\n collection: The collection of existing names.\n delimiter: The delimiter to connect the original name and an index.\n\n Returns:\n A unique name composed of name+delimiter+next index\n \"\"\"\n unique_name = name\n if unique_name in collection:\n for i in range(2, sys.maxsize**10):\n unique_name = name + delimiter + str(i)\n if unique_name not in collection:\n break\n return unique_name\n\n\ndef validate_pipeline_name(name: str) -> None:\n \"\"\"Validate pipeline name.\n\n A valid pipeline name should match ^[a-z0-9][a-z0-9-]{0,127}$.\n\n Args:\n name: The pipeline name.\n\n Raises:\n ValueError if the pipeline name doesn't conform to the regular expression.\n \"\"\"\n pattern = re.compile(r'^[a-z0-9][a-z0-9-]{0,127}$')\n if not pattern.match(name):\n raise ValueError(\n 'Invalid pipeline name: %s.\\n'\n 'Please specify a pipeline name that matches the regular '\n 'expression \"^[a-z0-9][a-z0-9-]{0,127}$\" using '\n '`dsl.pipeline(name=...)` decorator.' % name)\n", "path": "sdk/python/kfp/components/utils.py"}]}
1,966
115
gh_patches_debug_32958
rasdani/github-patches
git_diff
DataDog__dd-trace-py-498
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Tornado Tracer configuration doesn't have access to settings object for Trace Filtering </issue> <code> [start of ddtrace/contrib/tornado/__init__.py] 1 """ 2 The Tornado integration traces all ``RequestHandler`` defined in a Tornado web application. 3 Auto instrumentation is available using the ``patch`` function that **must be called before** 4 importing the tornado library. The following is an example:: 5 6 # patch before importing tornado and concurrent.futures 7 from ddtrace import tracer, patch 8 patch(tornado=True) 9 10 import tornado.web 11 import tornado.gen 12 import tornado.ioloop 13 14 # create your handlers 15 class MainHandler(tornado.web.RequestHandler): 16 @tornado.gen.coroutine 17 def get(self): 18 self.write("Hello, world") 19 20 # create your application 21 app = tornado.web.Application([ 22 (r'/', MainHandler), 23 ]) 24 25 # and run it as usual 26 app.listen(8888) 27 tornado.ioloop.IOLoop.current().start() 28 29 When any type of ``RequestHandler`` is hit, a request root span is automatically created. If 30 you want to trace more parts of your application, you can use the ``wrap()`` decorator and 31 the ``trace()`` method as usual:: 32 33 class MainHandler(tornado.web.RequestHandler): 34 @tornado.gen.coroutine 35 def get(self): 36 yield self.notify() 37 yield self.blocking_method() 38 with tracer.trace('tornado.before_write') as span: 39 # trace more work in the handler 40 41 @tracer.wrap('tornado.executor_handler') 42 @tornado.concurrent.run_on_executor 43 def blocking_method(self): 44 # do something expensive 45 46 @tracer.wrap('tornado.notify', service='tornado-notification') 47 @tornado.gen.coroutine 48 def notify(self): 49 # do something 50 51 Tornado settings can be used to change some tracing configuration, like:: 52 53 settings = { 54 'datadog_trace': { 55 'default_service': 'my-tornado-app', 56 'tags': {'env': 'production'}, 57 'distributed_tracing': True, 58 }, 59 } 60 61 app = tornado.web.Application([ 62 (r'/', MainHandler), 63 ], **settings) 64 65 The available settings are: 66 67 * ``default_service`` (default: `tornado-web`): set the service name used by the tracer. Usually 68 this configuration must be updated with a meaningful name. 69 * ``tags`` (default: `{}`): set global tags that should be applied to all spans. 70 * ``enabled`` (default: `True`): define if the tracer is enabled or not. If set to `false`, the 71 code is still instrumented but no spans are sent to the APM agent. 72 * ``distributed_tracing`` (default: `False`): enable distributed tracing if this is called 73 remotely from an instrumented application. 74 We suggest to enable it only for internal services where headers are under your control. 75 * ``agent_hostname`` (default: `localhost`): define the hostname of the APM agent. 76 * ``agent_port`` (default: `8126`): define the port of the APM agent. 77 """ 78 from ...utils.importlib import require_modules 79 80 81 required_modules = ['tornado'] 82 83 with require_modules(required_modules) as missing_modules: 84 if not missing_modules: 85 from .stack_context import run_with_trace_context, TracerStackContext 86 87 context_provider = TracerStackContext() 88 89 from .patch import patch, unpatch 90 91 __all__ = [ 92 'patch', 93 'unpatch', 94 'context_provider', 95 'run_with_trace_context', 96 'TracerStackContext', 97 ] 98 [end of ddtrace/contrib/tornado/__init__.py] [start of ddtrace/contrib/tornado/application.py] 1 import ddtrace 2 3 from tornado import template 4 5 from . import decorators, context_provider 6 from .constants import CONFIG_KEY 7 8 from ...ext import AppTypes 9 10 11 def tracer_config(__init__, app, args, kwargs): 12 """ 13 Wrap Tornado web application so that we can configure services info and 14 tracing settings after the initialization. 15 """ 16 # call the Application constructor 17 __init__(*args, **kwargs) 18 19 # default settings 20 settings = { 21 'tracer': ddtrace.tracer, 22 'default_service': 'tornado-web', 23 'distributed_tracing': False, 24 } 25 26 # update defaults with users settings 27 user_settings = app.settings.get(CONFIG_KEY) 28 if user_settings: 29 settings.update(user_settings) 30 31 app.settings[CONFIG_KEY] = settings 32 tracer = settings['tracer'] 33 service = settings['default_service'] 34 35 # the tracer must use the right Context propagation and wrap executor; 36 # this action is done twice because the patch() method uses the 37 # global tracer while here we can have a different instance (even if 38 # this is not usual). 39 tracer.configure( 40 context_provider=context_provider, 41 wrap_executor=decorators.wrap_executor, 42 enabled=settings.get('enabled', None), 43 hostname=settings.get('agent_hostname', None), 44 port=settings.get('agent_port', None), 45 ) 46 47 # set global tags if any 48 tags = settings.get('tags', None) 49 if tags: 50 tracer.set_tags(tags) 51 52 # configure the current service 53 tracer.set_service_info( 54 service=service, 55 app='tornado', 56 app_type=AppTypes.web, 57 ) 58 59 # configure the PIN object for template rendering 60 ddtrace.Pin(app='tornado', service=service, app_type='web', tracer=tracer).onto(template) 61 [end of ddtrace/contrib/tornado/application.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ddtrace/contrib/tornado/__init__.py b/ddtrace/contrib/tornado/__init__.py --- a/ddtrace/contrib/tornado/__init__.py +++ b/ddtrace/contrib/tornado/__init__.py @@ -55,6 +55,11 @@ 'default_service': 'my-tornado-app', 'tags': {'env': 'production'}, 'distributed_tracing': True, + 'settings': { + 'FILTERS': [ + FilterRequestsOnUrl(r'http://test\.example\.com'), + ], + }, }, } @@ -74,6 +79,7 @@ We suggest to enable it only for internal services where headers are under your control. * ``agent_hostname`` (default: `localhost`): define the hostname of the APM agent. * ``agent_port`` (default: `8126`): define the port of the APM agent. +* ``settings`` (default: ``{}``): Tracer extra settings used to change, for instance, the filtering behavior. """ from ...utils.importlib import require_modules diff --git a/ddtrace/contrib/tornado/application.py b/ddtrace/contrib/tornado/application.py --- a/ddtrace/contrib/tornado/application.py +++ b/ddtrace/contrib/tornado/application.py @@ -32,6 +32,9 @@ tracer = settings['tracer'] service = settings['default_service'] + # extract extra settings + extra_settings = settings.get('settings', {}) + # the tracer must use the right Context propagation and wrap executor; # this action is done twice because the patch() method uses the # global tracer while here we can have a different instance (even if @@ -42,6 +45,7 @@ enabled=settings.get('enabled', None), hostname=settings.get('agent_hostname', None), port=settings.get('agent_port', None), + settings=extra_settings, ) # set global tags if any
{"golden_diff": "diff --git a/ddtrace/contrib/tornado/__init__.py b/ddtrace/contrib/tornado/__init__.py\n--- a/ddtrace/contrib/tornado/__init__.py\n+++ b/ddtrace/contrib/tornado/__init__.py\n@@ -55,6 +55,11 @@\n 'default_service': 'my-tornado-app',\n 'tags': {'env': 'production'},\n 'distributed_tracing': True,\n+ 'settings': {\n+ 'FILTERS': [\n+ FilterRequestsOnUrl(r'http://test\\.example\\.com'),\n+ ],\n+ },\n },\n }\n \n@@ -74,6 +79,7 @@\n We suggest to enable it only for internal services where headers are under your control.\n * ``agent_hostname`` (default: `localhost`): define the hostname of the APM agent.\n * ``agent_port`` (default: `8126`): define the port of the APM agent.\n+* ``settings`` (default: ``{}``): Tracer extra settings used to change, for instance, the filtering behavior.\n \"\"\"\n from ...utils.importlib import require_modules\n \ndiff --git a/ddtrace/contrib/tornado/application.py b/ddtrace/contrib/tornado/application.py\n--- a/ddtrace/contrib/tornado/application.py\n+++ b/ddtrace/contrib/tornado/application.py\n@@ -32,6 +32,9 @@\n tracer = settings['tracer']\n service = settings['default_service']\n \n+ # extract extra settings\n+ extra_settings = settings.get('settings', {})\n+\n # the tracer must use the right Context propagation and wrap executor;\n # this action is done twice because the patch() method uses the\n # global tracer while here we can have a different instance (even if\n@@ -42,6 +45,7 @@\n enabled=settings.get('enabled', None),\n hostname=settings.get('agent_hostname', None),\n port=settings.get('agent_port', None),\n+ settings=extra_settings,\n )\n \n # set global tags if any\n", "issue": "Tornado Tracer configuration doesn't have access to settings object for Trace Filtering\n\n", "before_files": [{"content": "\"\"\"\nThe Tornado integration traces all ``RequestHandler`` defined in a Tornado web application.\nAuto instrumentation is available using the ``patch`` function that **must be called before**\nimporting the tornado library. The following is an example::\n\n # patch before importing tornado and concurrent.futures\n from ddtrace import tracer, patch\n patch(tornado=True)\n\n import tornado.web\n import tornado.gen\n import tornado.ioloop\n\n # create your handlers\n class MainHandler(tornado.web.RequestHandler):\n @tornado.gen.coroutine\n def get(self):\n self.write(\"Hello, world\")\n\n # create your application\n app = tornado.web.Application([\n (r'/', MainHandler),\n ])\n\n # and run it as usual\n app.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n\nWhen any type of ``RequestHandler`` is hit, a request root span is automatically created. If\nyou want to trace more parts of your application, you can use the ``wrap()`` decorator and\nthe ``trace()`` method as usual::\n\n class MainHandler(tornado.web.RequestHandler):\n @tornado.gen.coroutine\n def get(self):\n yield self.notify()\n yield self.blocking_method()\n with tracer.trace('tornado.before_write') as span:\n # trace more work in the handler\n\n @tracer.wrap('tornado.executor_handler')\n @tornado.concurrent.run_on_executor\n def blocking_method(self):\n # do something expensive\n\n @tracer.wrap('tornado.notify', service='tornado-notification')\n @tornado.gen.coroutine\n def notify(self):\n # do something\n\nTornado settings can be used to change some tracing configuration, like::\n\n settings = {\n 'datadog_trace': {\n 'default_service': 'my-tornado-app',\n 'tags': {'env': 'production'},\n 'distributed_tracing': True,\n },\n }\n\n app = tornado.web.Application([\n (r'/', MainHandler),\n ], **settings)\n\nThe available settings are:\n\n* ``default_service`` (default: `tornado-web`): set the service name used by the tracer. Usually\n this configuration must be updated with a meaningful name.\n* ``tags`` (default: `{}`): set global tags that should be applied to all spans.\n* ``enabled`` (default: `True`): define if the tracer is enabled or not. If set to `false`, the\n code is still instrumented but no spans are sent to the APM agent.\n* ``distributed_tracing`` (default: `False`): enable distributed tracing if this is called\n remotely from an instrumented application.\n We suggest to enable it only for internal services where headers are under your control.\n* ``agent_hostname`` (default: `localhost`): define the hostname of the APM agent.\n* ``agent_port`` (default: `8126`): define the port of the APM agent.\n\"\"\"\nfrom ...utils.importlib import require_modules\n\n\nrequired_modules = ['tornado']\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .stack_context import run_with_trace_context, TracerStackContext\n\n context_provider = TracerStackContext()\n\n from .patch import patch, unpatch\n\n __all__ = [\n 'patch',\n 'unpatch',\n 'context_provider',\n 'run_with_trace_context',\n 'TracerStackContext',\n ]\n", "path": "ddtrace/contrib/tornado/__init__.py"}, {"content": "import ddtrace\n\nfrom tornado import template\n\nfrom . import decorators, context_provider\nfrom .constants import CONFIG_KEY\n\nfrom ...ext import AppTypes\n\n\ndef tracer_config(__init__, app, args, kwargs):\n \"\"\"\n Wrap Tornado web application so that we can configure services info and\n tracing settings after the initialization.\n \"\"\"\n # call the Application constructor\n __init__(*args, **kwargs)\n\n # default settings\n settings = {\n 'tracer': ddtrace.tracer,\n 'default_service': 'tornado-web',\n 'distributed_tracing': False,\n }\n\n # update defaults with users settings\n user_settings = app.settings.get(CONFIG_KEY)\n if user_settings:\n settings.update(user_settings)\n\n app.settings[CONFIG_KEY] = settings\n tracer = settings['tracer']\n service = settings['default_service']\n\n # the tracer must use the right Context propagation and wrap executor;\n # this action is done twice because the patch() method uses the\n # global tracer while here we can have a different instance (even if\n # this is not usual).\n tracer.configure(\n context_provider=context_provider,\n wrap_executor=decorators.wrap_executor,\n enabled=settings.get('enabled', None),\n hostname=settings.get('agent_hostname', None),\n port=settings.get('agent_port', None),\n )\n\n # set global tags if any\n tags = settings.get('tags', None)\n if tags:\n tracer.set_tags(tags)\n\n # configure the current service\n tracer.set_service_info(\n service=service,\n app='tornado',\n app_type=AppTypes.web,\n )\n\n # configure the PIN object for template rendering\n ddtrace.Pin(app='tornado', service=service, app_type='web', tracer=tracer).onto(template)\n", "path": "ddtrace/contrib/tornado/application.py"}]}
2,024
442
gh_patches_debug_26961
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-1888
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Bug]: Basingstoke and Deane is broken since the 8th of Feb ### I Have A Problem With: A specific source ### What's Your Problem The service no longer downloads the waste updates. I tried 1.46 and the master. ### Source (if relevant) _No response_ ### Logs ```Shell Logger: waste_collection_schedule.source_shell Source: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136 integration: waste_collection_schedule (documentation) First occurred: 11:26:41 (1 occurrences) Last logged: 11:26:41 fetch failed for source Basingstoke and Deane Borough Council: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py", line 65, in fetch date=datetime.strptime(date_str, "%A, %d %B %Y").date(), ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.12/_strptime.py", line 554, in _strptime_datetime tt, fraction, gmtoff_fraction = _strptime(data_string, format) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.12/_strptime.py", line 333, in _strptime raise ValueError("time data %r does not match format %r" % ValueError: time data 'none / unknown' does not match format '%A, %d %B %Y' ``` ### Relevant Configuration ```YAML waste_collection_schedule: sources: - name: basingstoke_gov_uk args: uprn: "1000809XXXX" customize: - type: Garden show: True - type: Waste show: True - type: Recycling show: True - type: Glass show: True ``` ### Checklist Source Error - [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration) - [X] Checked that the website of your service provider is still working - [X] Tested my attributes on the service provider website (if possible) - [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version) ### Checklist Sensor Error - [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used) ### Required - [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been. - [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate. </issue> <code> [start of custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py] 1 from datetime import datetime 2 3 import requests 4 import urllib3 5 from bs4 import BeautifulSoup 6 from waste_collection_schedule import Collection # type: ignore[attr-defined] 7 8 # With verify=True the POST fails due to a SSLCertVerificationError. 9 # Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this: 10 # https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings 11 # https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl 12 # These two lines areused to suppress the InsecureRequestWarning when using verify=False 13 urllib3.disable_warnings() 14 15 TITLE = "Basingstoke and Deane Borough Council" 16 DESCRIPTION = "Source for basingstoke.gov.uk services for Basingstoke and Deane Borough Council, UK." 17 URL = "https://basingstoke.gov.uk" 18 TEST_CASES = { 19 "Test_001": {"uprn": "100060234732"}, 20 "Test_002": {"uprn": "100060218986"}, 21 "Test_003": {"uprn": 100060235836}, 22 "Test_004": {"uprn": 100060224194}, 23 } 24 HEADERS = { 25 "user-agent": "Mozilla/5.0", 26 } 27 ICON_MAP = { 28 "WASTE": "mdi:trash-can", 29 "RECYCLING": "mdi:recycle", 30 "GARDEN": "mdi:leaf", 31 "GLASS": "mdi:glass-fragile", 32 } 33 34 35 class Source: 36 def __init__(self, uprn): 37 self._uprn = str(uprn) 38 39 def fetch(self): 40 REQUEST_COOKIES = { 41 "cookie_control_popup": "N", 42 "WhenAreMyBinsCollected": self._uprn, 43 } 44 r = requests.get( 45 "https://www.basingstoke.gov.uk/bincollections", 46 headers=HEADERS, 47 cookies=REQUEST_COOKIES, 48 verify=False, 49 ) 50 r.raise_for_status() 51 52 soup = BeautifulSoup(r.text, "html.parser") 53 54 services = soup.findAll("div", {"class": "service"}) 55 56 entries = [] 57 58 for service in services: 59 waste_type = service.find("h2").text.split(" ")[0] 60 schedule_dates = service.findAll("li") 61 for schedule in schedule_dates: 62 date_str = schedule.text.split("(")[0].strip() 63 entries.append( 64 Collection( 65 date=datetime.strptime(date_str, "%A, %d %B %Y").date(), 66 t=waste_type, 67 icon=ICON_MAP.get(waste_type.upper()), 68 ) 69 ) 70 71 return entries 72 [end of custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py @@ -1,3 +1,4 @@ +import logging from datetime import datetime import requests @@ -30,6 +31,7 @@ "GARDEN": "mdi:leaf", "GLASS": "mdi:glass-fragile", } +LOGGER = logging.getLogger(__name__) class Source: @@ -60,9 +62,17 @@ schedule_dates = service.findAll("li") for schedule in schedule_dates: date_str = schedule.text.split("(")[0].strip() + try: + date = datetime.strptime(date_str, "%A, %d %B %Y").date() + except ValueError as e: + LOGGER.warning( + f"Failed to parse date '{date_str}' for wastetype {waste_type}: {e}" + ) + continue + entries.append( Collection( - date=datetime.strptime(date_str, "%A, %d %B %Y").date(), + date=date, t=waste_type, icon=ICON_MAP.get(waste_type.upper()), )
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py\n@@ -1,3 +1,4 @@\n+import logging\n from datetime import datetime\n \n import requests\n@@ -30,6 +31,7 @@\n \"GARDEN\": \"mdi:leaf\",\n \"GLASS\": \"mdi:glass-fragile\",\n }\n+LOGGER = logging.getLogger(__name__)\n \n \n class Source:\n@@ -60,9 +62,17 @@\n schedule_dates = service.findAll(\"li\")\n for schedule in schedule_dates:\n date_str = schedule.text.split(\"(\")[0].strip()\n+ try:\n+ date = datetime.strptime(date_str, \"%A, %d %B %Y\").date()\n+ except ValueError as e:\n+ LOGGER.warning(\n+ f\"Failed to parse date '{date_str}' for wastetype {waste_type}: {e}\"\n+ )\n+ continue\n+\n entries.append(\n Collection(\n- date=datetime.strptime(date_str, \"%A, %d %B %Y\").date(),\n+ date=date,\n t=waste_type,\n icon=ICON_MAP.get(waste_type.upper()),\n )\n", "issue": "[Bug]: Basingstoke and Deane is broken since the 8th of Feb\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nThe service no longer downloads the waste updates. I tried 1.46 and the master.\n\n### Source (if relevant)\n\n_No response_\n\n### Logs\n\n```Shell\nLogger: waste_collection_schedule.source_shell\r\nSource: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136\r\nintegration: waste_collection_schedule (documentation)\r\nFirst occurred: 11:26:41 (1 occurrences)\r\nLast logged: 11:26:41\r\n\r\nfetch failed for source Basingstoke and Deane Borough Council: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py\", line 65, in fetch date=datetime.strptime(date_str, \"%A, %d %B %Y\").date(), ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File \"/usr/local/lib/python3.12/_strptime.py\", line 554, in _strptime_datetime tt, fraction, gmtoff_fraction = _strptime(data_string, format) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File \"/usr/local/lib/python3.12/_strptime.py\", line 333, in _strptime raise ValueError(\"time data %r does not match format %r\" % ValueError: time data 'none / unknown' does not match format '%A, %d %B %Y'\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: basingstoke_gov_uk\r\n args:\r\n uprn: \"1000809XXXX\"\r\n customize:\r\n - type: Garden\r\n show: True\r\n - type: Waste\r\n show: True\r\n - type: Recycling\r\n show: True\r\n - type: Glass\r\n show: True\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport requests\nimport urllib3\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\n# With verify=True the POST fails due to a SSLCertVerificationError.\n# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:\n# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings\n# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl\n# These two lines areused to suppress the InsecureRequestWarning when using verify=False\nurllib3.disable_warnings()\n\nTITLE = \"Basingstoke and Deane Borough Council\"\nDESCRIPTION = \"Source for basingstoke.gov.uk services for Basingstoke and Deane Borough Council, UK.\"\nURL = \"https://basingstoke.gov.uk\"\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"100060234732\"},\n \"Test_002\": {\"uprn\": \"100060218986\"},\n \"Test_003\": {\"uprn\": 100060235836},\n \"Test_004\": {\"uprn\": 100060224194},\n}\nHEADERS = {\n \"user-agent\": \"Mozilla/5.0\",\n}\nICON_MAP = {\n \"WASTE\": \"mdi:trash-can\",\n \"RECYCLING\": \"mdi:recycle\",\n \"GARDEN\": \"mdi:leaf\",\n \"GLASS\": \"mdi:glass-fragile\",\n}\n\n\nclass Source:\n def __init__(self, uprn):\n self._uprn = str(uprn)\n\n def fetch(self):\n REQUEST_COOKIES = {\n \"cookie_control_popup\": \"N\",\n \"WhenAreMyBinsCollected\": self._uprn,\n }\n r = requests.get(\n \"https://www.basingstoke.gov.uk/bincollections\",\n headers=HEADERS,\n cookies=REQUEST_COOKIES,\n verify=False,\n )\n r.raise_for_status()\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n services = soup.findAll(\"div\", {\"class\": \"service\"})\n\n entries = []\n\n for service in services:\n waste_type = service.find(\"h2\").text.split(\" \")[0]\n schedule_dates = service.findAll(\"li\")\n for schedule in schedule_dates:\n date_str = schedule.text.split(\"(\")[0].strip()\n entries.append(\n Collection(\n date=datetime.strptime(date_str, \"%A, %d %B %Y\").date(),\n t=waste_type,\n icon=ICON_MAP.get(waste_type.upper()),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py"}]}
2,004
325
gh_patches_debug_32666
rasdani/github-patches
git_diff
opsdroid__opsdroid-1776
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Infinite self-responses in Mattermost connector After fixing the Mattermost connector with PR #1774 it turns out it suffers from the same infinite self-response problem (#1691) as was fixed for the Gitter connector in #1692. </issue> <code> [start of opsdroid/connector/mattermost/__init__.py] 1 """A connector for Mattermost.""" 2 import logging 3 import json 4 5 from mattermostdriver import Driver, Websocket 6 from voluptuous import Required 7 8 from opsdroid.connector import Connector, register_event 9 from opsdroid.events import Message 10 11 _LOGGER = logging.getLogger(__name__) 12 CONFIG_SCHEMA = { 13 Required("token"): str, 14 Required("url"): str, 15 Required("team-name"): str, 16 "scheme": str, 17 "port": int, 18 "ssl-verify": bool, 19 "connect-timeout": int, 20 } 21 22 23 class ConnectorMattermost(Connector): 24 """A connector for Mattermost.""" 25 26 def __init__(self, config, opsdroid=None): 27 """Create the connector.""" 28 super().__init__(config, opsdroid=opsdroid) 29 _LOGGER.debug(_("Starting Mattermost connector")) 30 self.name = "mattermost" 31 self.token = config["token"] 32 self.url = config["url"] 33 self.team_name = config["team-name"] 34 self.scheme = config.get("scheme", "https") 35 self.port = config.get("port", 8065) 36 self.verify = config.get("ssl-verify", True) 37 self.timeout = config.get("connect-timeout", 30) 38 self.request_timeout = None 39 self.mfa_token = None 40 self.debug = False 41 self.listening = True 42 43 self.mm_driver = Driver( 44 { 45 "url": self.url, 46 "token": self.token, 47 "scheme": self.scheme, 48 "port": self.port, 49 "verify": self.verify, 50 "timeout": self.timeout, 51 "request_timeout": self.request_timeout, 52 "mfa_token": self.mfa_token, 53 "debug": self.debug, 54 } 55 ) 56 57 async def connect(self): 58 """Connect to the chat service.""" 59 _LOGGER.info(_("Connecting to Mattermost")) 60 61 login_response = self.mm_driver.login() 62 63 _LOGGER.info(login_response) 64 65 if "id" in login_response: 66 self.bot_id = login_response["id"] 67 if "username" in login_response: 68 self.bot_name = login_response["username"] 69 70 _LOGGER.info(_("Connected as %s"), self.bot_name) 71 72 self.mm_driver.websocket = Websocket( 73 self.mm_driver.options, self.mm_driver.client.token 74 ) 75 76 _LOGGER.info(_("Connected successfully")) 77 78 async def disconnect(self): 79 """Disconnect from Mattermost.""" 80 self.listening = False 81 self.mm_driver.logout() 82 83 async def listen(self): 84 """Listen for and parse new messages.""" 85 await self.mm_driver.websocket.connect(self.process_message) 86 87 async def process_message(self, raw_message): 88 """Process a raw message and pass it to the parser.""" 89 _LOGGER.info(raw_message) 90 91 message = json.loads(raw_message) 92 93 if "event" in message and message["event"] == "posted": 94 data = message["data"] 95 post = json.loads(data["post"]) 96 await self.opsdroid.parse( 97 Message( 98 text=post["message"], 99 user=data["sender_name"], 100 target=data["channel_name"], 101 connector=self, 102 raw_event=message, 103 ) 104 ) 105 106 @register_event(Message) 107 async def send_message(self, message): 108 """Respond with a message.""" 109 _LOGGER.debug( 110 _("Responding with: '%s' in room %s"), message.text, message.target 111 ) 112 channel_id = self.mm_driver.channels.get_channel_by_name_and_team_name( 113 self.team_name, message.target 114 )["id"] 115 self.mm_driver.posts.create_post( 116 options={"channel_id": channel_id, "message": message.text} 117 ) 118 [end of opsdroid/connector/mattermost/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opsdroid/connector/mattermost/__init__.py b/opsdroid/connector/mattermost/__init__.py --- a/opsdroid/connector/mattermost/__init__.py +++ b/opsdroid/connector/mattermost/__init__.py @@ -39,6 +39,7 @@ self.mfa_token = None self.debug = False self.listening = True + self.bot_id = None self.mm_driver = Driver( { @@ -66,8 +67,7 @@ self.bot_id = login_response["id"] if "username" in login_response: self.bot_name = login_response["username"] - - _LOGGER.info(_("Connected as %s"), self.bot_name) + _LOGGER.info(_("Connected as %s"), self.bot_name) self.mm_driver.websocket = Websocket( self.mm_driver.options, self.mm_driver.client.token @@ -93,15 +93,18 @@ if "event" in message and message["event"] == "posted": data = message["data"] post = json.loads(data["post"]) - await self.opsdroid.parse( - Message( - text=post["message"], - user=data["sender_name"], - target=data["channel_name"], - connector=self, - raw_event=message, + # don't parse our own messages (https://github.com/opsdroid/opsdroid/issues/1775) + # (but also parse if somehow our bot_id is unknown, like in the unit tests) + if self.bot_id is None or self.bot_id != post["user_id"]: + await self.opsdroid.parse( + Message( + text=post["message"], + user=data["sender_name"], + target=data["channel_name"], + connector=self, + raw_event=message, + ) ) - ) @register_event(Message) async def send_message(self, message):
{"golden_diff": "diff --git a/opsdroid/connector/mattermost/__init__.py b/opsdroid/connector/mattermost/__init__.py\n--- a/opsdroid/connector/mattermost/__init__.py\n+++ b/opsdroid/connector/mattermost/__init__.py\n@@ -39,6 +39,7 @@\n self.mfa_token = None\n self.debug = False\n self.listening = True\n+ self.bot_id = None\n \n self.mm_driver = Driver(\n {\n@@ -66,8 +67,7 @@\n self.bot_id = login_response[\"id\"]\n if \"username\" in login_response:\n self.bot_name = login_response[\"username\"]\n-\n- _LOGGER.info(_(\"Connected as %s\"), self.bot_name)\n+ _LOGGER.info(_(\"Connected as %s\"), self.bot_name)\n \n self.mm_driver.websocket = Websocket(\n self.mm_driver.options, self.mm_driver.client.token\n@@ -93,15 +93,18 @@\n if \"event\" in message and message[\"event\"] == \"posted\":\n data = message[\"data\"]\n post = json.loads(data[\"post\"])\n- await self.opsdroid.parse(\n- Message(\n- text=post[\"message\"],\n- user=data[\"sender_name\"],\n- target=data[\"channel_name\"],\n- connector=self,\n- raw_event=message,\n+ # don't parse our own messages (https://github.com/opsdroid/opsdroid/issues/1775)\n+ # (but also parse if somehow our bot_id is unknown, like in the unit tests)\n+ if self.bot_id is None or self.bot_id != post[\"user_id\"]:\n+ await self.opsdroid.parse(\n+ Message(\n+ text=post[\"message\"],\n+ user=data[\"sender_name\"],\n+ target=data[\"channel_name\"],\n+ connector=self,\n+ raw_event=message,\n+ )\n )\n- )\n \n @register_event(Message)\n async def send_message(self, message):\n", "issue": "Infinite self-responses in Mattermost connector\nAfter fixing the Mattermost connector with PR #1774 it turns out it suffers from the same infinite self-response problem (#1691) as was fixed for the Gitter connector in #1692.\n", "before_files": [{"content": "\"\"\"A connector for Mattermost.\"\"\"\nimport logging\nimport json\n\nfrom mattermostdriver import Driver, Websocket\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n Required(\"url\"): str,\n Required(\"team-name\"): str,\n \"scheme\": str,\n \"port\": int,\n \"ssl-verify\": bool,\n \"connect-timeout\": int,\n}\n\n\nclass ConnectorMattermost(Connector):\n \"\"\"A connector for Mattermost.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Mattermost connector\"))\n self.name = \"mattermost\"\n self.token = config[\"token\"]\n self.url = config[\"url\"]\n self.team_name = config[\"team-name\"]\n self.scheme = config.get(\"scheme\", \"https\")\n self.port = config.get(\"port\", 8065)\n self.verify = config.get(\"ssl-verify\", True)\n self.timeout = config.get(\"connect-timeout\", 30)\n self.request_timeout = None\n self.mfa_token = None\n self.debug = False\n self.listening = True\n\n self.mm_driver = Driver(\n {\n \"url\": self.url,\n \"token\": self.token,\n \"scheme\": self.scheme,\n \"port\": self.port,\n \"verify\": self.verify,\n \"timeout\": self.timeout,\n \"request_timeout\": self.request_timeout,\n \"mfa_token\": self.mfa_token,\n \"debug\": self.debug,\n }\n )\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Mattermost\"))\n\n login_response = self.mm_driver.login()\n\n _LOGGER.info(login_response)\n\n if \"id\" in login_response:\n self.bot_id = login_response[\"id\"]\n if \"username\" in login_response:\n self.bot_name = login_response[\"username\"]\n\n _LOGGER.info(_(\"Connected as %s\"), self.bot_name)\n\n self.mm_driver.websocket = Websocket(\n self.mm_driver.options, self.mm_driver.client.token\n )\n\n _LOGGER.info(_(\"Connected successfully\"))\n\n async def disconnect(self):\n \"\"\"Disconnect from Mattermost.\"\"\"\n self.listening = False\n self.mm_driver.logout()\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n await self.mm_driver.websocket.connect(self.process_message)\n\n async def process_message(self, raw_message):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n _LOGGER.info(raw_message)\n\n message = json.loads(raw_message)\n\n if \"event\" in message and message[\"event\"] == \"posted\":\n data = message[\"data\"]\n post = json.loads(data[\"post\"])\n await self.opsdroid.parse(\n Message(\n text=post[\"message\"],\n user=data[\"sender_name\"],\n target=data[\"channel_name\"],\n connector=self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s\"), message.text, message.target\n )\n channel_id = self.mm_driver.channels.get_channel_by_name_and_team_name(\n self.team_name, message.target\n )[\"id\"]\n self.mm_driver.posts.create_post(\n options={\"channel_id\": channel_id, \"message\": message.text}\n )\n", "path": "opsdroid/connector/mattermost/__init__.py"}]}
1,637
441
gh_patches_debug_26810
rasdani/github-patches
git_diff
facebookresearch__fairscale-1108
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Lots of Commandline Output from this line. https://github.com/facebookresearch/fairscale/blob/2350968ee61a6f9ca6ecd24aba9db536e814a24c/fairscale/internal/version.py#L27 this warning appears a LOT in the commandline output when training. Can we remove it or place it in a place where it gets only shown once at the start? </issue> <code> [start of fairscale/internal/version.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. 2 # 3 # This source code is licensed under the BSD license found in the 4 # LICENSE file in the root directory of this source tree. 5 6 import logging 7 import re 8 from typing import List, Tuple 9 10 import torch 11 12 __all__: List[str] = ["torch_version"] 13 14 15 def torch_version(version: str = torch.__version__) -> Tuple[int, ...]: 16 numbering = re.search(r"^(\d+).(\d+).(\d+)([^\+]*)(\+\S*)?$", version) 17 if not numbering: 18 return tuple() 19 # Catch torch version if run against internal pre-releases, like `1.8.0a0fb`, 20 if numbering.group(4): 21 # Two options here: 22 # - either skip this version (minor number check is not relevant) 23 # - or check that our codebase is not broken by this ongoing development. 24 25 # Assuming that we're interested in the second use-case more than the first, 26 # return the pre-release or dev numbering 27 logging.warning(f"Pytorch pre-release version {version} - assuming intent to test it") 28 29 return tuple(int(numbering.group(n)) for n in range(1, 4)) 30 [end of fairscale/internal/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/fairscale/internal/version.py b/fairscale/internal/version.py --- a/fairscale/internal/version.py +++ b/fairscale/internal/version.py @@ -11,13 +11,15 @@ __all__: List[str] = ["torch_version"] +_logged = False def torch_version(version: str = torch.__version__) -> Tuple[int, ...]: numbering = re.search(r"^(\d+).(\d+).(\d+)([^\+]*)(\+\S*)?$", version) if not numbering: return tuple() # Catch torch version if run against internal pre-releases, like `1.8.0a0fb`, - if numbering.group(4): + global _logged + if numbering.group(4) and not _logged: # Two options here: # - either skip this version (minor number check is not relevant) # - or check that our codebase is not broken by this ongoing development. @@ -25,5 +27,6 @@ # Assuming that we're interested in the second use-case more than the first, # return the pre-release or dev numbering logging.warning(f"Pytorch pre-release version {version} - assuming intent to test it") + _logged = True return tuple(int(numbering.group(n)) for n in range(1, 4))
{"golden_diff": "diff --git a/fairscale/internal/version.py b/fairscale/internal/version.py\n--- a/fairscale/internal/version.py\n+++ b/fairscale/internal/version.py\n@@ -11,13 +11,15 @@\n \n __all__: List[str] = [\"torch_version\"]\n \n+_logged = False\n \n def torch_version(version: str = torch.__version__) -> Tuple[int, ...]:\n numbering = re.search(r\"^(\\d+).(\\d+).(\\d+)([^\\+]*)(\\+\\S*)?$\", version)\n if not numbering:\n return tuple()\n # Catch torch version if run against internal pre-releases, like `1.8.0a0fb`,\n- if numbering.group(4):\n+ global _logged\n+ if numbering.group(4) and not _logged:\n # Two options here:\n # - either skip this version (minor number check is not relevant)\n # - or check that our codebase is not broken by this ongoing development.\n@@ -25,5 +27,6 @@\n # Assuming that we're interested in the second use-case more than the first,\n # return the pre-release or dev numbering\n logging.warning(f\"Pytorch pre-release version {version} - assuming intent to test it\")\n+ _logged = True\n \n return tuple(int(numbering.group(n)) for n in range(1, 4))\n", "issue": "Lots of Commandline Output from this line.\n\r\nhttps://github.com/facebookresearch/fairscale/blob/2350968ee61a6f9ca6ecd24aba9db536e814a24c/fairscale/internal/version.py#L27\r\n\r\nthis warning appears a LOT in the commandline output when training. Can we remove it or place it in a place where it gets only shown once at the start?\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nimport re\nfrom typing import List, Tuple\n\nimport torch\n\n__all__: List[str] = [\"torch_version\"]\n\n\ndef torch_version(version: str = torch.__version__) -> Tuple[int, ...]:\n numbering = re.search(r\"^(\\d+).(\\d+).(\\d+)([^\\+]*)(\\+\\S*)?$\", version)\n if not numbering:\n return tuple()\n # Catch torch version if run against internal pre-releases, like `1.8.0a0fb`,\n if numbering.group(4):\n # Two options here:\n # - either skip this version (minor number check is not relevant)\n # - or check that our codebase is not broken by this ongoing development.\n\n # Assuming that we're interested in the second use-case more than the first,\n # return the pre-release or dev numbering\n logging.warning(f\"Pytorch pre-release version {version} - assuming intent to test it\")\n\n return tuple(int(numbering.group(n)) for n in range(1, 4))\n", "path": "fairscale/internal/version.py"}]}
960
301
gh_patches_debug_13256
rasdani/github-patches
git_diff
openvinotoolkit__datumaro-278
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Test errors after tensorflow installation on Ubuntu 20.04 Ubuntu20.04, Python 3.8.5 Installation of tensorflow (to enable skipped tests) results with tests errors. **Steps to reproduce:** ``` git clone https://github.com/openvinotoolkit/datumaro cd datumaro python3 -m pip install virtualenv python3 -m virtualenv venv . venv/bin/activate pip install datumaro python3 -m unittest -v //there are some skipped tests - required tensorflow and pandas) pip install tensorflow //during installation numpy 1.20.3 was uninstalled and 1.19.5 was installed python3 -m unittest -v ``` **Expected result:** No test errors after installation libraries required to perform initially skipped tests. **Current result:** ``` Ran 390 tests in 11.807s FAILED (errors=29, skipped=7) ``` ```====================================================================== ERROR: test_validate_annotations_segmentation (tests.test_validator.TestValidateAnnotations) ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/sstrehlk/src/datum_p/tests/test_validator.py", line 803, in test_validate_annotations_segmentation actual_results = validate_annotations(self.dataset, 'segmentation', File "/home/sstrehlk/src/datum_p/datumaro/components/validator.py", line 1255, in validate_annotations stats = validator.compute_statistics(dataset) File "/home/sstrehlk/src/datum_p/datumaro/components/validator.py", line 1064, in compute_statistics _update_mask_stats_by_label( File "/home/sstrehlk/src/datum_p/datumaro/components/validator.py", line 1026, in _update_mask_stats_by_label area = ann.get_area() File "/home/sstrehlk/src/datum_p/datumaro/components/extractor.py", line 374, in get_area import pycocotools.mask as mask_utils File "/home/sstrehlk/src/datum_p/venv/lib/python3.8/site-packages/pycocotools/mask.py", line 3, in <module> import pycocotools._mask as _mask File "pycocotools/_mask.pyx", line 1, in init pycocotools._mask ValueError: numpy.ndarray size changed, may indicate binary incompatibility. Expected 88 from C header, got 80 from PyObject ---------------------------------------------------------------------- ``` It seems that there is incompatibility between numpy 1.19.5 and pycocotools 2.0.2. There is workaround for it: ``` pip install pycocotools==2.0.0 ``` </issue> <code> [start of setup.py] 1 2 # Copyright (C) 2019-2020 Intel Corporation 3 # 4 # SPDX-License-Identifier: MIT 5 6 from distutils.util import strtobool 7 import os 8 import os.path as osp 9 import re 10 import setuptools 11 12 # Snyk scan integration 13 here = None 14 15 16 def find_version(project_dir=None): 17 if not project_dir: 18 project_dir = osp.dirname(osp.abspath(__file__)) 19 20 file_path = osp.join(project_dir, 'datumaro', 'version.py') 21 22 with open(file_path, 'r') as version_file: 23 version_text = version_file.read() 24 25 # PEP440: 26 # https://www.python.org/dev/peps/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions 27 pep_regex = r'([1-9]\d*!)?(0|[1-9]\d*)(\.(0|[1-9]\d*))*((a|b|rc)(0|[1-9]\d*))?(\.post(0|[1-9]\d*))?(\.dev(0|[1-9]\d*))?' 28 version_regex = r'VERSION\s*=\s*.(' + pep_regex + ').' 29 match = re.match(version_regex, version_text) 30 if not match: 31 raise RuntimeError("Failed to find version string in '%s'" % file_path) 32 33 version = version_text[match.start(1) : match.end(1)] 34 return version 35 36 def get_requirements(): 37 requirements = [ 38 'attrs>=19.3.0', 39 'defusedxml', 40 'GitPython', 41 'lxml', 42 'matplotlib', 43 'numpy>=1.17.3', 44 'Pillow', 45 'pycocotools; platform_system != "Windows"', 46 'pycocotools-windows; platform_system == "Windows"', 47 'PyYAML', 48 'scikit-image', 49 'tensorboardX', 50 ] 51 if strtobool(os.getenv('DATUMARO_HEADLESS', '0').lower()): 52 requirements.append('opencv-python-headless') 53 else: 54 requirements.append('opencv-python') 55 56 return requirements 57 58 with open('README.md', 'r') as fh: 59 long_description = fh.read() 60 61 setuptools.dist.Distribution().fetch_build_eggs([ 62 'Cython>=0.27.3' # required for pycocotools and others, if need to compile 63 ]) 64 65 setuptools.setup( 66 name="datumaro", 67 version=find_version(here), 68 author="Intel", 69 author_email="[email protected]", 70 description="Dataset Management Framework (Datumaro)", 71 long_description=long_description, 72 long_description_content_type="text/markdown", 73 url="https://github.com/openvinotoolkit/datumaro", 74 packages=setuptools.find_packages(exclude=['tests*']), 75 classifiers=[ 76 "Programming Language :: Python :: 3", 77 "License :: OSI Approved :: MIT License", 78 "Operating System :: OS Independent", 79 ], 80 python_requires='>=3.6', 81 install_requires=get_requirements(), 82 extras_require={ 83 'tf': ['tensorflow'], 84 'tf-gpu': ['tensorflow-gpu'], 85 }, 86 entry_points={ 87 'console_scripts': [ 88 'datum=datumaro.cli.__main__:main', 89 ], 90 }, 91 ) 92 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -42,8 +42,17 @@ 'matplotlib', 'numpy>=1.17.3', 'Pillow', - 'pycocotools; platform_system != "Windows"', + + # Avoid 2.0.2 Linux binary distribution because of + # a conflict in numpy versions with TensorFlow: + # - TF is compiled with numpy 1.19 ABI + # - pycocotools is compiled with numpy 1.20 ABI + # Using a previous version allows to force package rebuilding. + # + # https://github.com/openvinotoolkit/datumaro/issues/253 + 'pycocotools!=2.0.2; platform_system != "Windows"', 'pycocotools-windows; platform_system == "Windows"', + 'PyYAML', 'scikit-image', 'tensorboardX',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -42,8 +42,17 @@\n 'matplotlib',\n 'numpy>=1.17.3',\n 'Pillow',\n- 'pycocotools; platform_system != \"Windows\"',\n+\n+ # Avoid 2.0.2 Linux binary distribution because of\n+ # a conflict in numpy versions with TensorFlow:\n+ # - TF is compiled with numpy 1.19 ABI\n+ # - pycocotools is compiled with numpy 1.20 ABI\n+ # Using a previous version allows to force package rebuilding.\n+ #\n+ # https://github.com/openvinotoolkit/datumaro/issues/253\n+ 'pycocotools!=2.0.2; platform_system != \"Windows\"',\n 'pycocotools-windows; platform_system == \"Windows\"',\n+\n 'PyYAML',\n 'scikit-image',\n 'tensorboardX',\n", "issue": "Test errors after tensorflow installation on Ubuntu 20.04\nUbuntu20.04, Python 3.8.5 \r\nInstallation of tensorflow (to enable skipped tests) results with tests errors.\r\n\r\n**Steps to reproduce:**\r\n```\r\ngit clone https://github.com/openvinotoolkit/datumaro \r\ncd datumaro\r\npython3 -m pip install virtualenv\r\npython3 -m virtualenv venv\r\n. venv/bin/activate\r\npip install datumaro\r\npython3 -m unittest -v\r\n//there are some skipped tests - required tensorflow and pandas)\r\npip install tensorflow\r\n//during installation numpy 1.20.3 was uninstalled and 1.19.5 was installed\r\npython3 -m unittest -v\r\n```\r\n**Expected result:**\r\nNo test errors after installation libraries required to perform initially skipped tests.\r\n\r\n**Current result:**\r\n```\r\nRan 390 tests in 11.807s\r\n\r\nFAILED (errors=29, skipped=7)\r\n```\r\n```======================================================================\r\nERROR: test_validate_annotations_segmentation (tests.test_validator.TestValidateAnnotations)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/home/sstrehlk/src/datum_p/tests/test_validator.py\", line 803, in test_validate_annotations_segmentation\r\n actual_results = validate_annotations(self.dataset, 'segmentation',\r\n File \"/home/sstrehlk/src/datum_p/datumaro/components/validator.py\", line 1255, in validate_annotations\r\n stats = validator.compute_statistics(dataset)\r\n File \"/home/sstrehlk/src/datum_p/datumaro/components/validator.py\", line 1064, in compute_statistics\r\n _update_mask_stats_by_label(\r\n File \"/home/sstrehlk/src/datum_p/datumaro/components/validator.py\", line 1026, in _update_mask_stats_by_label\r\n area = ann.get_area()\r\n File \"/home/sstrehlk/src/datum_p/datumaro/components/extractor.py\", line 374, in get_area\r\n import pycocotools.mask as mask_utils\r\n File \"/home/sstrehlk/src/datum_p/venv/lib/python3.8/site-packages/pycocotools/mask.py\", line 3, in <module>\r\n import pycocotools._mask as _mask\r\n File \"pycocotools/_mask.pyx\", line 1, in init pycocotools._mask\r\nValueError: numpy.ndarray size changed, may indicate binary incompatibility. Expected 88 from C header, got 80 from PyObject\r\n\r\n----------------------------------------------------------------------\r\n```\r\nIt seems that there is incompatibility between numpy 1.19.5 and pycocotools 2.0.2. There is workaround for it: \r\n```\r\npip install pycocotools==2.0.0\r\n``` \n", "before_files": [{"content": "\n# Copyright (C) 2019-2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom distutils.util import strtobool\nimport os\nimport os.path as osp\nimport re\nimport setuptools\n\n# Snyk scan integration\nhere = None\n\n\ndef find_version(project_dir=None):\n if not project_dir:\n project_dir = osp.dirname(osp.abspath(__file__))\n\n file_path = osp.join(project_dir, 'datumaro', 'version.py')\n\n with open(file_path, 'r') as version_file:\n version_text = version_file.read()\n\n # PEP440:\n # https://www.python.org/dev/peps/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions\n pep_regex = r'([1-9]\\d*!)?(0|[1-9]\\d*)(\\.(0|[1-9]\\d*))*((a|b|rc)(0|[1-9]\\d*))?(\\.post(0|[1-9]\\d*))?(\\.dev(0|[1-9]\\d*))?'\n version_regex = r'VERSION\\s*=\\s*.(' + pep_regex + ').'\n match = re.match(version_regex, version_text)\n if not match:\n raise RuntimeError(\"Failed to find version string in '%s'\" % file_path)\n\n version = version_text[match.start(1) : match.end(1)]\n return version\n\ndef get_requirements():\n requirements = [\n 'attrs>=19.3.0',\n 'defusedxml',\n 'GitPython',\n 'lxml',\n 'matplotlib',\n 'numpy>=1.17.3',\n 'Pillow',\n 'pycocotools; platform_system != \"Windows\"',\n 'pycocotools-windows; platform_system == \"Windows\"',\n 'PyYAML',\n 'scikit-image',\n 'tensorboardX',\n ]\n if strtobool(os.getenv('DATUMARO_HEADLESS', '0').lower()):\n requirements.append('opencv-python-headless')\n else:\n requirements.append('opencv-python')\n\n return requirements\n\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\nsetuptools.dist.Distribution().fetch_build_eggs([\n 'Cython>=0.27.3' # required for pycocotools and others, if need to compile\n])\n\nsetuptools.setup(\n name=\"datumaro\",\n version=find_version(here),\n author=\"Intel\",\n author_email=\"[email protected]\",\n description=\"Dataset Management Framework (Datumaro)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/openvinotoolkit/datumaro\",\n packages=setuptools.find_packages(exclude=['tests*']),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n install_requires=get_requirements(),\n extras_require={\n 'tf': ['tensorflow'],\n 'tf-gpu': ['tensorflow-gpu'],\n },\n entry_points={\n 'console_scripts': [\n 'datum=datumaro.cli.__main__:main',\n ],\n },\n)\n", "path": "setup.py"}]}
2,044
222
gh_patches_debug_28710
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-1639
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Bug]: Waste type not showing for City of Doncaster (Green Bin Collection) ### I Have A Problem With: A specific source, The integration in general ### What's Your Problem For Doncaster Council the Green bin collection only runs 9months out of 12. Next collection is in early March which can be viewed via the Council bin look-up calendar, but the waste type is not returned on the integration, it only seems to scrape 3 weeks in advance, can this be changed? ### Source (if relevant) doncaster_gov_uk ### Logs ```Shell no relevant logs ``` ### Relevant Configuration ```YAML waste_collection_schedule: sources: - name: doncaster_gov_uk args: uprn: "xxxxxxx" - platform: waste_collection_schedule name: Bins details_format: appointment_types leadtime: 90 # value_template: VALUE_TEMPLATE # date_template: DATE_TEMPLATE add_days_to: true # event_index: EVENT_INDEX ``` ### Checklist Source Error - [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration) - [X] Checked that the website of your service provider is still working - [ ] Tested my attributes on the service provider website (if possible) - [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version) ### Checklist Sensor Error - [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used) ### Required - [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been. - [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate. </issue> <code> [start of custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py] 1 import re 2 import requests 3 import json 4 from datetime import datetime, timedelta 5 from waste_collection_schedule import Collection # type: ignore[attr-defined] 6 7 TITLE = "City of Doncaster Council" 8 DESCRIPTION = "Source for doncaster.gov.uk services for the City of Doncaster Council, UK." 9 URL = "https://doncaster.gov.uk" 10 11 TEST_CASES = { 12 "Test_001": {"uprn": "100050701118"}, 13 "Test_002": {"uprn": "100050753396"}, 14 "Test_003": {"uprn": 100050699118}, 15 } 16 17 ICON_MAP = { 18 "GREEN": "mdi:leaf", 19 "RECYCLING": "mdi:recycle", 20 "BLACK": "mdi:trash-can", 21 "BULKY": "mdi:fridge", 22 "RE-USE": "mdi:sofa", 23 } 24 25 REGEX_DATE = r"\(([0-9]{10})" 26 27 28 class Source: 29 def __init__(self, uprn): 30 self._uprn = str(uprn).zfill(12) 31 32 33 def fetch(self): 34 35 # Query needs start and end epoch dates 36 today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) 37 start = (today - timedelta(weeks=3)).strftime("%s") 38 end = (today + timedelta(weeks=3)).strftime("%s") 39 url = f"https://www.doncaster.gov.uk/Compass/PremiseDetail/GetCollectionsForCalendar?UPRN={self._uprn}&Start={start}&End={end}" 40 # start = start.strftime("%s") 41 # end = end.strftime("%s") 42 43 s = requests.Session() 44 r = s.get(url) 45 data = json.loads(r.text) 46 47 entries = [] 48 49 for entry in data["slots"]: 50 waste_type = entry["title"] 51 waste_date = entry["end"] 52 epoch = re.findall(REGEX_DATE, waste_date) 53 waste_date = datetime.fromtimestamp(int(epoch[0])).date() 54 entries.append( 55 Collection( 56 date=waste_date, 57 t=waste_type, 58 icon=ICON_MAP.get(waste_type.upper()), 59 ) 60 ) 61 62 return entries 63 [end of custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py @@ -1,11 +1,14 @@ -import re -import requests import json +import re from datetime import datetime, timedelta + +import requests from waste_collection_schedule import Collection # type: ignore[attr-defined] TITLE = "City of Doncaster Council" -DESCRIPTION = "Source for doncaster.gov.uk services for the City of Doncaster Council, UK." +DESCRIPTION = ( + "Source for doncaster.gov.uk services for the City of Doncaster Council, UK." +) URL = "https://doncaster.gov.uk" TEST_CASES = { @@ -29,13 +32,11 @@ def __init__(self, uprn): self._uprn = str(uprn).zfill(12) - def fetch(self): - - # Query needs start and end epoch dates + # Query needs start and end epoch dates today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) - start = (today - timedelta(weeks=3)).strftime("%s") - end = (today + timedelta(weeks=3)).strftime("%s") + start = (today - timedelta(days=365)).strftime("%s") + end = (today + timedelta(days=365)).strftime("%s") url = f"https://www.doncaster.gov.uk/Compass/PremiseDetail/GetCollectionsForCalendar?UPRN={self._uprn}&Start={start}&End={end}" # start = start.strftime("%s") # end = end.strftime("%s")
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py\n@@ -1,11 +1,14 @@\n-import re\n-import requests\n import json\n+import re\n from datetime import datetime, timedelta\n+\n+import requests\n from waste_collection_schedule import Collection # type: ignore[attr-defined]\n \n TITLE = \"City of Doncaster Council\"\n-DESCRIPTION = \"Source for doncaster.gov.uk services for the City of Doncaster Council, UK.\"\n+DESCRIPTION = (\n+ \"Source for doncaster.gov.uk services for the City of Doncaster Council, UK.\"\n+)\n URL = \"https://doncaster.gov.uk\"\n \n TEST_CASES = {\n@@ -29,13 +32,11 @@\n def __init__(self, uprn):\n self._uprn = str(uprn).zfill(12)\n \n-\n def fetch(self):\n-\n- # Query needs start and end epoch dates \n+ # Query needs start and end epoch dates\n today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)\n- start = (today - timedelta(weeks=3)).strftime(\"%s\")\n- end = (today + timedelta(weeks=3)).strftime(\"%s\")\n+ start = (today - timedelta(days=365)).strftime(\"%s\")\n+ end = (today + timedelta(days=365)).strftime(\"%s\")\n url = f\"https://www.doncaster.gov.uk/Compass/PremiseDetail/GetCollectionsForCalendar?UPRN={self._uprn}&Start={start}&End={end}\"\n # start = start.strftime(\"%s\")\n # end = end.strftime(\"%s\")\n", "issue": "[Bug]: Waste type not showing for City of Doncaster (Green Bin Collection)\n### I Have A Problem With:\n\nA specific source, The integration in general\n\n### What's Your Problem\n\nFor Doncaster Council the Green bin collection only runs 9months out of 12. Next collection is in early March which can be viewed via the Council bin look-up calendar, but the waste type is not returned on the integration, it only seems to scrape 3 weeks in advance, can this be changed? \n\n### Source (if relevant)\n\ndoncaster_gov_uk\n\n### Logs\n\n```Shell\nno relevant logs\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: doncaster_gov_uk\r\n args:\r\n uprn: \"xxxxxxx\"\r\n\r\n - platform: waste_collection_schedule\r\n name: Bins\r\n details_format: appointment_types\r\n leadtime: 90\r\n# value_template: VALUE_TEMPLATE\r\n# date_template: DATE_TEMPLATE\r\n add_days_to: true\r\n# event_index: EVENT_INDEX\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [ ] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import re\nimport requests\nimport json\nfrom datetime import datetime, timedelta\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"City of Doncaster Council\"\nDESCRIPTION = \"Source for doncaster.gov.uk services for the City of Doncaster Council, UK.\"\nURL = \"https://doncaster.gov.uk\"\n\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"100050701118\"},\n \"Test_002\": {\"uprn\": \"100050753396\"},\n \"Test_003\": {\"uprn\": 100050699118},\n}\n\nICON_MAP = {\n \"GREEN\": \"mdi:leaf\",\n \"RECYCLING\": \"mdi:recycle\",\n \"BLACK\": \"mdi:trash-can\",\n \"BULKY\": \"mdi:fridge\",\n \"RE-USE\": \"mdi:sofa\",\n}\n\nREGEX_DATE = r\"\\(([0-9]{10})\"\n\n\nclass Source:\n def __init__(self, uprn):\n self._uprn = str(uprn).zfill(12)\n\n\n def fetch(self):\n\n # Query needs start and end epoch dates \n today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)\n start = (today - timedelta(weeks=3)).strftime(\"%s\")\n end = (today + timedelta(weeks=3)).strftime(\"%s\")\n url = f\"https://www.doncaster.gov.uk/Compass/PremiseDetail/GetCollectionsForCalendar?UPRN={self._uprn}&Start={start}&End={end}\"\n # start = start.strftime(\"%s\")\n # end = end.strftime(\"%s\")\n\n s = requests.Session()\n r = s.get(url)\n data = json.loads(r.text)\n\n entries = []\n\n for entry in data[\"slots\"]:\n waste_type = entry[\"title\"]\n waste_date = entry[\"end\"]\n epoch = re.findall(REGEX_DATE, waste_date)\n waste_date = datetime.fromtimestamp(int(epoch[0])).date()\n entries.append(\n Collection(\n date=waste_date,\n t=waste_type,\n icon=ICON_MAP.get(waste_type.upper()),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py"}]}
1,631
436
gh_patches_debug_1937
rasdani/github-patches
git_diff
ivy-llc__ivy-23588
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ifft2 </issue> <code> [start of ivy/functional/frontends/jax/numpy/fft.py] 1 # local 2 import ivy 3 from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back 4 from ivy.func_wrapper import with_unsupported_dtypes 5 6 7 @to_ivy_arrays_and_back 8 def fft(a, n=None, axis=-1, norm=None): 9 if norm is None: 10 norm = "backward" 11 return ivy.fft(a, axis, norm=norm, n=n) 12 13 14 @to_ivy_arrays_and_back 15 def fft2(a, s=None, axes=(-2, -1), norm=None): 16 if norm is None: 17 norm = "backward" 18 return ivy.array(ivy.fft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a)) 19 20 21 @to_ivy_arrays_and_back 22 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 23 def fftshift(x, axes=None, name=None): 24 shape = x.shape 25 26 if axes is None: 27 axes = tuple(range(x.ndim)) 28 shifts = [(dim // 2) for dim in shape] 29 elif isinstance(axes, int): 30 shifts = shape[axes] // 2 31 else: 32 shifts = [shape[ax] // 2 for ax in axes] 33 34 roll = ivy.roll(x, shifts, axis=axes) 35 36 return roll 37 38 39 @to_ivy_arrays_and_back 40 def ifft(a, n=None, axis=-1, norm=None): 41 if norm is None: 42 norm = "backward" 43 return ivy.ifft(a, axis, norm=norm, n=n) 44 [end of ivy/functional/frontends/jax/numpy/fft.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/jax/numpy/fft.py b/ivy/functional/frontends/jax/numpy/fft.py --- a/ivy/functional/frontends/jax/numpy/fft.py +++ b/ivy/functional/frontends/jax/numpy/fft.py @@ -41,3 +41,10 @@ if norm is None: norm = "backward" return ivy.ifft(a, axis, norm=norm, n=n) + + +@to_ivy_arrays_and_back +def ifft2(a, s=None, axes=(-2, -1), norm=None): + if norm is None: + norm = "backward" + return ivy.array(ivy.ifft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a))
{"golden_diff": "diff --git a/ivy/functional/frontends/jax/numpy/fft.py b/ivy/functional/frontends/jax/numpy/fft.py\n--- a/ivy/functional/frontends/jax/numpy/fft.py\n+++ b/ivy/functional/frontends/jax/numpy/fft.py\n@@ -41,3 +41,10 @@\n if norm is None:\n norm = \"backward\"\n return ivy.ifft(a, axis, norm=norm, n=n)\n+\n+\n+@to_ivy_arrays_and_back\n+def ifft2(a, s=None, axes=(-2, -1), norm=None):\n+ if norm is None:\n+ norm = \"backward\"\n+ return ivy.array(ivy.ifft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a))\n", "issue": " ifft2\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\n\n@to_ivy_arrays_and_back\ndef fft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.fft(a, axis, norm=norm, n=n)\n\n\n@to_ivy_arrays_and_back\ndef fft2(a, s=None, axes=(-2, -1), norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.array(ivy.fft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a))\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\ndef fftshift(x, axes=None, name=None):\n shape = x.shape\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shifts = [(dim // 2) for dim in shape]\n elif isinstance(axes, int):\n shifts = shape[axes] // 2\n else:\n shifts = [shape[ax] // 2 for ax in axes]\n\n roll = ivy.roll(x, shifts, axis=axes)\n\n return roll\n\n\n@to_ivy_arrays_and_back\ndef ifft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.ifft(a, axis, norm=norm, n=n)\n", "path": "ivy/functional/frontends/jax/numpy/fft.py"}]}
991
182
gh_patches_debug_37299
rasdani/github-patches
git_diff
evennia__evennia-1725
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> develop: Website not correctly logging in when logged in in-game #### Brief summary of issue / Description of requested feature: When logging in in-game, the website does not detect it, but if you try to log in you still get a warning saying that you are already logged in. #### Steps to reproduce the issue / Reasons for adding feature: 1. Don't log into the website but open the web client 2. Log into the webclient as usual. 3. Go back to the website - you are not shown as logged in, but clicking `Log in` will still give you an error. #### Error output / Expected result of feature When logged into the game, this should be reflected by the web site. See closed #1063. #### Extra information, such as Evennia revision/repo/branch, operating system and ideas for how to solve / implement: This is a regression, probably from changes in the session handling/sharing between client and website. </issue> <code> [start of evennia/web/utils/middleware.py] 1 from django.contrib.auth import authenticate, login 2 from evennia.accounts.models import AccountDB 3 from evennia.utils import logger 4 5 class SharedLoginMiddleware(object): 6 """ 7 Handle the shared login between website and webclient. 8 9 """ 10 def __init__(self, get_response): 11 # One-time configuration and initialization. 12 self.get_response = get_response 13 14 def __call__(self, request): 15 # Code to be executed for each request before 16 # the view (and later middleware) are called. 17 18 # Process view 19 response = self.get_response(request) 20 21 # Code to be executed for each request/response after 22 # the view is called. 23 24 # Synchronize credentials 25 self.make_shared_login(request) 26 27 # Return processed view 28 return response 29 30 @classmethod 31 def make_shared_login(cls, request): 32 csession = request.session 33 account = request.user 34 website_uid = csession.get("website_authenticated_uid", None) 35 webclient_uid = csession.get("webclient_authenticated_uid", None) 36 37 if not csession.session_key: 38 # this is necessary to build the sessid key 39 csession.save() 40 41 if account.is_authenticated(): 42 # Logged into website 43 if not website_uid: 44 # fresh website login (just from login page) 45 csession["website_authenticated_uid"] = account.id 46 if webclient_uid is None: 47 # auto-login web client 48 csession["webclient_authenticated_uid"] = account.id 49 50 elif webclient_uid: 51 # Not logged into website, but logged into webclient 52 if not website_uid: 53 csession["website_authenticated_uid"] = account.id 54 account = AccountDB.objects.get(id=webclient_uid) 55 try: 56 # calls our custom authenticate, in web/utils/backend.py 57 authenticate(autologin=account) 58 login(request, account) 59 except AttributeError: 60 logger.log_trace() [end of evennia/web/utils/middleware.py] [start of evennia/web/webclient/views.py] 1 2 """ 3 This contains a simple view for rendering the webclient 4 page and serve it eventual static content. 5 6 """ 7 from __future__ import print_function 8 from django.shortcuts import render 9 from django.contrib.auth import login, authenticate 10 11 from evennia.accounts.models import AccountDB 12 from evennia.utils import logger 13 14 15 def _shared_login(request): 16 """ 17 Handle the shared login between website and webclient. 18 19 """ 20 csession = request.session 21 account = request.user 22 # these can have 3 values: 23 # None - previously unused (auto-login) 24 # False - actively logged out (don't auto-login) 25 # <uid> - logged in User/Account id 26 website_uid = csession.get("website_authenticated_uid", None) 27 webclient_uid = csession.get("webclient_authenticated_uid", None) 28 29 # check if user has authenticated to website 30 if not csession.session_key: 31 # this is necessary to build the sessid key 32 csession.save() 33 34 if webclient_uid: 35 # The webclient has previously registered a login to this browser_session 36 if not account.is_authenticated() and not website_uid: 37 try: 38 account = AccountDB.objects.get(id=webclient_uid) 39 except AccountDB.DoesNotExist: 40 # this can happen e.g. for guest accounts or deletions 41 csession["website_authenticated_uid"] = False 42 csession["webclient_authenticated_uid"] = False 43 return 44 try: 45 # calls our custom authenticate in web/utils/backends.py 46 account = authenticate(autologin=account) 47 login(request, account) 48 csession["website_authenticated_uid"] = webclient_uid 49 except AttributeError: 50 logger.log_trace() 51 52 53 def webclient(request): 54 """ 55 Webclient page template loading. 56 57 """ 58 # handle webclient-website shared login 59 _shared_login(request) 60 61 # make sure to store the browser session's hash so the webclient can get to it! 62 pagevars = {'browser_sessid': request.session.session_key} 63 64 return render(request, 'webclient.html', pagevars) 65 [end of evennia/web/webclient/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/evennia/web/utils/middleware.py b/evennia/web/utils/middleware.py --- a/evennia/web/utils/middleware.py +++ b/evennia/web/utils/middleware.py @@ -15,15 +15,16 @@ # Code to be executed for each request before # the view (and later middleware) are called. + # Synchronize credentials between webclient and website + # Must be performed *before* rendering the view (issue #1723) + self.make_shared_login(request) + # Process view response = self.get_response(request) # Code to be executed for each request/response after # the view is called. - # Synchronize credentials - self.make_shared_login(request) - # Return processed view return response diff --git a/evennia/web/webclient/views.py b/evennia/web/webclient/views.py --- a/evennia/web/webclient/views.py +++ b/evennia/web/webclient/views.py @@ -12,52 +12,13 @@ from evennia.utils import logger -def _shared_login(request): - """ - Handle the shared login between website and webclient. - - """ - csession = request.session - account = request.user - # these can have 3 values: - # None - previously unused (auto-login) - # False - actively logged out (don't auto-login) - # <uid> - logged in User/Account id - website_uid = csession.get("website_authenticated_uid", None) - webclient_uid = csession.get("webclient_authenticated_uid", None) - - # check if user has authenticated to website - if not csession.session_key: - # this is necessary to build the sessid key - csession.save() - - if webclient_uid: - # The webclient has previously registered a login to this browser_session - if not account.is_authenticated() and not website_uid: - try: - account = AccountDB.objects.get(id=webclient_uid) - except AccountDB.DoesNotExist: - # this can happen e.g. for guest accounts or deletions - csession["website_authenticated_uid"] = False - csession["webclient_authenticated_uid"] = False - return - try: - # calls our custom authenticate in web/utils/backends.py - account = authenticate(autologin=account) - login(request, account) - csession["website_authenticated_uid"] = webclient_uid - except AttributeError: - logger.log_trace() - - def webclient(request): """ Webclient page template loading. """ - # handle webclient-website shared login - _shared_login(request) - + # auto-login is now handled by evennia.web.utils.middleware + # make sure to store the browser session's hash so the webclient can get to it! pagevars = {'browser_sessid': request.session.session_key}
{"golden_diff": "diff --git a/evennia/web/utils/middleware.py b/evennia/web/utils/middleware.py\n--- a/evennia/web/utils/middleware.py\n+++ b/evennia/web/utils/middleware.py\n@@ -15,15 +15,16 @@\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n \n+ # Synchronize credentials between webclient and website\n+ # Must be performed *before* rendering the view (issue #1723)\n+ self.make_shared_login(request)\n+ \n # Process view\n response = self.get_response(request)\n \n # Code to be executed for each request/response after\n # the view is called.\n \n- # Synchronize credentials\n- self.make_shared_login(request)\n- \n # Return processed view\n return response\n \ndiff --git a/evennia/web/webclient/views.py b/evennia/web/webclient/views.py\n--- a/evennia/web/webclient/views.py\n+++ b/evennia/web/webclient/views.py\n@@ -12,52 +12,13 @@\n from evennia.utils import logger\n \n \n-def _shared_login(request):\n- \"\"\"\n- Handle the shared login between website and webclient.\n-\n- \"\"\"\n- csession = request.session\n- account = request.user\n- # these can have 3 values:\n- # None - previously unused (auto-login)\n- # False - actively logged out (don't auto-login)\n- # <uid> - logged in User/Account id\n- website_uid = csession.get(\"website_authenticated_uid\", None)\n- webclient_uid = csession.get(\"webclient_authenticated_uid\", None)\n-\n- # check if user has authenticated to website\n- if not csession.session_key:\n- # this is necessary to build the sessid key\n- csession.save()\n-\n- if webclient_uid:\n- # The webclient has previously registered a login to this browser_session\n- if not account.is_authenticated() and not website_uid:\n- try:\n- account = AccountDB.objects.get(id=webclient_uid)\n- except AccountDB.DoesNotExist:\n- # this can happen e.g. for guest accounts or deletions\n- csession[\"website_authenticated_uid\"] = False\n- csession[\"webclient_authenticated_uid\"] = False\n- return\n- try:\n- # calls our custom authenticate in web/utils/backends.py\n- account = authenticate(autologin=account)\n- login(request, account)\n- csession[\"website_authenticated_uid\"] = webclient_uid\n- except AttributeError:\n- logger.log_trace()\n-\n-\n def webclient(request):\n \"\"\"\n Webclient page template loading.\n \n \"\"\"\n- # handle webclient-website shared login\n- _shared_login(request)\n-\n+ # auto-login is now handled by evennia.web.utils.middleware\n+ \n # make sure to store the browser session's hash so the webclient can get to it!\n pagevars = {'browser_sessid': request.session.session_key}\n", "issue": "develop: Website not correctly logging in when logged in in-game\n#### Brief summary of issue / Description of requested feature:\r\n\r\nWhen logging in in-game, the website does not detect it, but if you try to log in you still get a warning saying that you are already logged in.\r\n\r\n#### Steps to reproduce the issue / Reasons for adding feature:\r\n\r\n1. Don't log into the website but open the web client\r\n2. Log into the webclient as usual.\r\n3. Go back to the website - you are not shown as logged in, but clicking `Log in` will still give you an error. \r\n\r\n#### Error output / Expected result of feature\r\n\r\nWhen logged into the game, this should be reflected by the web site. See closed #1063. \r\n\r\n#### Extra information, such as Evennia revision/repo/branch, operating system and ideas for how to solve / implement:\r\n\r\nThis is a regression, probably from changes in the session handling/sharing between client and website.\n", "before_files": [{"content": "from django.contrib.auth import authenticate, login\nfrom evennia.accounts.models import AccountDB\nfrom evennia.utils import logger\n\nclass SharedLoginMiddleware(object):\n \"\"\"\n Handle the shared login between website and webclient.\n\n \"\"\"\n def __init__(self, get_response):\n # One-time configuration and initialization.\n self.get_response = get_response\n \n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n \n # Process view\n response = self.get_response(request)\n\n # Code to be executed for each request/response after\n # the view is called.\n \n # Synchronize credentials\n self.make_shared_login(request)\n \n # Return processed view\n return response\n \n @classmethod\n def make_shared_login(cls, request):\n csession = request.session\n account = request.user\n website_uid = csession.get(\"website_authenticated_uid\", None)\n webclient_uid = csession.get(\"webclient_authenticated_uid\", None)\n \n if not csession.session_key:\n # this is necessary to build the sessid key\n csession.save()\n \n if account.is_authenticated():\n # Logged into website\n if not website_uid:\n # fresh website login (just from login page)\n csession[\"website_authenticated_uid\"] = account.id\n if webclient_uid is None:\n # auto-login web client\n csession[\"webclient_authenticated_uid\"] = account.id\n \n elif webclient_uid:\n # Not logged into website, but logged into webclient\n if not website_uid:\n csession[\"website_authenticated_uid\"] = account.id\n account = AccountDB.objects.get(id=webclient_uid)\n try:\n # calls our custom authenticate, in web/utils/backend.py\n authenticate(autologin=account)\n login(request, account)\n except AttributeError:\n logger.log_trace()", "path": "evennia/web/utils/middleware.py"}, {"content": "\n\"\"\"\nThis contains a simple view for rendering the webclient\npage and serve it eventual static content.\n\n\"\"\"\nfrom __future__ import print_function\nfrom django.shortcuts import render\nfrom django.contrib.auth import login, authenticate\n\nfrom evennia.accounts.models import AccountDB\nfrom evennia.utils import logger\n\n\ndef _shared_login(request):\n \"\"\"\n Handle the shared login between website and webclient.\n\n \"\"\"\n csession = request.session\n account = request.user\n # these can have 3 values:\n # None - previously unused (auto-login)\n # False - actively logged out (don't auto-login)\n # <uid> - logged in User/Account id\n website_uid = csession.get(\"website_authenticated_uid\", None)\n webclient_uid = csession.get(\"webclient_authenticated_uid\", None)\n\n # check if user has authenticated to website\n if not csession.session_key:\n # this is necessary to build the sessid key\n csession.save()\n\n if webclient_uid:\n # The webclient has previously registered a login to this browser_session\n if not account.is_authenticated() and not website_uid:\n try:\n account = AccountDB.objects.get(id=webclient_uid)\n except AccountDB.DoesNotExist:\n # this can happen e.g. for guest accounts or deletions\n csession[\"website_authenticated_uid\"] = False\n csession[\"webclient_authenticated_uid\"] = False\n return\n try:\n # calls our custom authenticate in web/utils/backends.py\n account = authenticate(autologin=account)\n login(request, account)\n csession[\"website_authenticated_uid\"] = webclient_uid\n except AttributeError:\n logger.log_trace()\n\n\ndef webclient(request):\n \"\"\"\n Webclient page template loading.\n\n \"\"\"\n # handle webclient-website shared login\n _shared_login(request)\n\n # make sure to store the browser session's hash so the webclient can get to it!\n pagevars = {'browser_sessid': request.session.session_key}\n\n return render(request, 'webclient.html', pagevars)\n", "path": "evennia/web/webclient/views.py"}]}
1,852
675
gh_patches_debug_18069
rasdani/github-patches
git_diff
microsoft__torchgeo-1647
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Checkpoint saving not working as expected ### Description After migrating to release 0.5.0 noticed that checkpoint saving is not working as expected. ## description tried different configuration e.g., `checkpoint_callback = ModelCheckpoint(monitor="val_loss", dirpath=ckpt_dir, save_last=True, every_n_epochs=1, save_top_k=1)` for example when running 20-30 epochs for training a model. after training was completed could not find the ckpt file. what was found was a single ckpt file of the first epoch only, in a wrong directory. ## severance the bug is very limiting. for example, after hours of training a model, there is no way to load the model from a checkpoint to run inference. the single shot to run inference was during the same run. ## expected behavior using a given configuration expected to see: - checkpoint files saved every number of epoch - the last epoch checkpoint file - the checkpoints should have been saved to the given directory ## observed behavior - after training several epochs only the first was saved. - the single checkpoint were saved to another directory under the logger output ## initial investigation 1. checkpoint callback created and training fit called 2. later, see image and call stack: seems like c'tor called again with save_last=None ![image](https://github.com/microsoft/torchgeo/assets/11072478/667b6cc1-8b7b-4ede-a61d-c365e851ab59) 3. when saving later supposed to happen, the save_last is None: ![image](https://github.com/microsoft/torchgeo/assets/11072478/2dd68eff-3c43-4ccc-8ce7-4a463044000b) 4. last checkpoint saving is skipped ### Steps to reproduce 1. create a checkpoint callback and use different checkpoints saving parameters e.g., checkpoint_callback = ModelCheckpoint(monitor="val_loss", dirpath=ckpt_dir, save_last=True, every_n_epochs=1, save_top_k=1) 2. call trainer fit and run several epochs 3. check expected results: - saving location as expected e.g., under C:\foo - check last epoch checkpoint saved - must have last.ckpt - check how many checkpoints were saved e.g., every 2 etc ### Version torchgeo version 0.5.0, lightning version 2.0.9 </issue> <code> [start of torchgeo/trainers/base.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 """Base classes for all :mod:`torchgeo` trainers.""" 5 6 from abc import ABC, abstractmethod 7 from typing import Any 8 9 import lightning 10 from lightning.pytorch import LightningModule 11 from lightning.pytorch.callbacks import Callback, EarlyStopping, ModelCheckpoint 12 from torch.optim import AdamW 13 from torch.optim.lr_scheduler import ReduceLROnPlateau 14 15 16 class BaseTask(LightningModule, ABC): 17 """Abstract base class for all TorchGeo trainers. 18 19 .. versionadded:: 0.5 20 """ 21 22 #: Model to train. 23 model: Any 24 25 #: Performance metric to monitor in learning rate scheduler and callbacks. 26 monitor = "val_loss" 27 28 #: Whether the goal is to minimize or maximize the performance metric to monitor. 29 mode = "min" 30 31 def __init__(self) -> None: 32 """Initialize a new BaseTask instance.""" 33 super().__init__() 34 self.save_hyperparameters() 35 self.configure_losses() 36 self.configure_metrics() 37 self.configure_models() 38 39 def configure_callbacks(self) -> list[Callback]: 40 """Initialize model-specific callbacks. 41 42 Returns: 43 List of callbacks to apply. 44 """ 45 return [ 46 ModelCheckpoint(monitor=self.monitor, mode=self.mode), 47 EarlyStopping(monitor=self.monitor, mode=self.mode), 48 ] 49 50 def configure_losses(self) -> None: 51 """Initialize the loss criterion.""" 52 53 def configure_metrics(self) -> None: 54 """Initialize the performance metrics.""" 55 56 @abstractmethod 57 def configure_models(self) -> None: 58 """Initialize the model.""" 59 60 def configure_optimizers( 61 self, 62 ) -> "lightning.pytorch.utilities.types.OptimizerLRSchedulerConfig": 63 """Initialize the optimizer and learning rate scheduler. 64 65 Returns: 66 Optimizer and learning rate scheduler. 67 """ 68 optimizer = AdamW(self.parameters(), lr=self.hparams["lr"]) 69 scheduler = ReduceLROnPlateau(optimizer, patience=self.hparams["patience"]) 70 return { 71 "optimizer": optimizer, 72 "lr_scheduler": {"scheduler": scheduler, "monitor": self.monitor}, 73 } 74 75 def forward(self, *args: Any, **kwargs: Any) -> Any: 76 """Forward pass of the model. 77 78 Args: 79 args: Arguments to pass to model. 80 kwargs: Keyword arguments to pass to model. 81 82 Returns: 83 Output of the model. 84 """ 85 return self.model(*args, **kwargs) 86 [end of torchgeo/trainers/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchgeo/trainers/base.py b/torchgeo/trainers/base.py --- a/torchgeo/trainers/base.py +++ b/torchgeo/trainers/base.py @@ -8,7 +8,6 @@ import lightning from lightning.pytorch import LightningModule -from lightning.pytorch.callbacks import Callback, EarlyStopping, ModelCheckpoint from torch.optim import AdamW from torch.optim.lr_scheduler import ReduceLROnPlateau @@ -36,17 +35,6 @@ self.configure_metrics() self.configure_models() - def configure_callbacks(self) -> list[Callback]: - """Initialize model-specific callbacks. - - Returns: - List of callbacks to apply. - """ - return [ - ModelCheckpoint(monitor=self.monitor, mode=self.mode), - EarlyStopping(monitor=self.monitor, mode=self.mode), - ] - def configure_losses(self) -> None: """Initialize the loss criterion."""
{"golden_diff": "diff --git a/torchgeo/trainers/base.py b/torchgeo/trainers/base.py\n--- a/torchgeo/trainers/base.py\n+++ b/torchgeo/trainers/base.py\n@@ -8,7 +8,6 @@\n \n import lightning\n from lightning.pytorch import LightningModule\n-from lightning.pytorch.callbacks import Callback, EarlyStopping, ModelCheckpoint\n from torch.optim import AdamW\n from torch.optim.lr_scheduler import ReduceLROnPlateau\n \n@@ -36,17 +35,6 @@\n self.configure_metrics()\n self.configure_models()\n \n- def configure_callbacks(self) -> list[Callback]:\n- \"\"\"Initialize model-specific callbacks.\n-\n- Returns:\n- List of callbacks to apply.\n- \"\"\"\n- return [\n- ModelCheckpoint(monitor=self.monitor, mode=self.mode),\n- EarlyStopping(monitor=self.monitor, mode=self.mode),\n- ]\n-\n def configure_losses(self) -> None:\n \"\"\"Initialize the loss criterion.\"\"\"\n", "issue": "Checkpoint saving not working as expected\n### Description\r\n\r\nAfter migrating to release 0.5.0 noticed that checkpoint saving is not working as expected.\r\n\r\n## description\r\ntried different configuration e.g., `checkpoint_callback = ModelCheckpoint(monitor=\"val_loss\", dirpath=ckpt_dir, save_last=True, every_n_epochs=1, save_top_k=1)` for example when running 20-30 epochs for training a model.\r\nafter training was completed could not find the ckpt file. what was found was a single ckpt file of the first epoch only, in a wrong directory.\r\n\r\n## severance\r\nthe bug is very limiting. for example, after hours of training a model, there is no way to load the model from a checkpoint to run inference. the single shot to run inference was during the same run. \r\n\r\n## expected behavior\r\nusing a given configuration expected to see:\r\n- checkpoint files saved every number of epoch\r\n- the last epoch checkpoint file\r\n- the checkpoints should have been saved to the given directory\r\n\r\n## observed behavior\r\n- after training several epochs only the first was saved.\r\n- the single checkpoint were saved to another directory under the logger output\r\n\r\n## initial investigation\r\n1. checkpoint callback created and training fit called\r\n2. later, see image and call stack:\r\nseems like c'tor called again with save_last=None\r\n![image](https://github.com/microsoft/torchgeo/assets/11072478/667b6cc1-8b7b-4ede-a61d-c365e851ab59)\r\n\r\n3. when saving later supposed to happen, the save_last is None:\r\n![image](https://github.com/microsoft/torchgeo/assets/11072478/2dd68eff-3c43-4ccc-8ce7-4a463044000b)\r\n\r\n4. last checkpoint saving is skipped\r\n\r\n\r\n### Steps to reproduce\r\n\r\n1. create a checkpoint callback and use different checkpoints saving parameters e.g., checkpoint_callback = ModelCheckpoint(monitor=\"val_loss\", dirpath=ckpt_dir, save_last=True, every_n_epochs=1, save_top_k=1)\r\n2. call trainer fit and run several epochs\r\n3. check expected results:\r\n- saving location as expected e.g., under C:\\foo \r\n- check last epoch checkpoint saved - must have last.ckpt\r\n- check how many checkpoints were saved e.g., every 2 etc\r\n\r\n### Version\r\n\r\ntorchgeo version 0.5.0, lightning version 2.0.9\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Base classes for all :mod:`torchgeo` trainers.\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom typing import Any\n\nimport lightning\nfrom lightning.pytorch import LightningModule\nfrom lightning.pytorch.callbacks import Callback, EarlyStopping, ModelCheckpoint\nfrom torch.optim import AdamW\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\n\nclass BaseTask(LightningModule, ABC):\n \"\"\"Abstract base class for all TorchGeo trainers.\n\n .. versionadded:: 0.5\n \"\"\"\n\n #: Model to train.\n model: Any\n\n #: Performance metric to monitor in learning rate scheduler and callbacks.\n monitor = \"val_loss\"\n\n #: Whether the goal is to minimize or maximize the performance metric to monitor.\n mode = \"min\"\n\n def __init__(self) -> None:\n \"\"\"Initialize a new BaseTask instance.\"\"\"\n super().__init__()\n self.save_hyperparameters()\n self.configure_losses()\n self.configure_metrics()\n self.configure_models()\n\n def configure_callbacks(self) -> list[Callback]:\n \"\"\"Initialize model-specific callbacks.\n\n Returns:\n List of callbacks to apply.\n \"\"\"\n return [\n ModelCheckpoint(monitor=self.monitor, mode=self.mode),\n EarlyStopping(monitor=self.monitor, mode=self.mode),\n ]\n\n def configure_losses(self) -> None:\n \"\"\"Initialize the loss criterion.\"\"\"\n\n def configure_metrics(self) -> None:\n \"\"\"Initialize the performance metrics.\"\"\"\n\n @abstractmethod\n def configure_models(self) -> None:\n \"\"\"Initialize the model.\"\"\"\n\n def configure_optimizers(\n self,\n ) -> \"lightning.pytorch.utilities.types.OptimizerLRSchedulerConfig\":\n \"\"\"Initialize the optimizer and learning rate scheduler.\n\n Returns:\n Optimizer and learning rate scheduler.\n \"\"\"\n optimizer = AdamW(self.parameters(), lr=self.hparams[\"lr\"])\n scheduler = ReduceLROnPlateau(optimizer, patience=self.hparams[\"patience\"])\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\"scheduler\": scheduler, \"monitor\": self.monitor},\n }\n\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Forward pass of the model.\n\n Args:\n args: Arguments to pass to model.\n kwargs: Keyword arguments to pass to model.\n\n Returns:\n Output of the model.\n \"\"\"\n return self.model(*args, **kwargs)\n", "path": "torchgeo/trainers/base.py"}]}
1,772
207
gh_patches_debug_14240
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-87
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> @ character If I type @ in a chat I got this. (using https://github.com/leandrotoledo/python-telegram-bot/blob/master/examples/echobot.py) TypeError: b'hola @honguitobot' is not JSON serializable </issue> <code> [start of examples/echobot.py] 1 #!/usr/bin/env python 2 # 3 # Simple Bot to reply Telegram messages 4 # Copyright (C) 2015 Leandro Toledo de Souza <[email protected]> 5 # 6 # This program is free software: you can redistribute it and/or modify 7 # it under the terms of the GNU General Public License as published by 8 # the Free Software Foundation, either version 3 of the License, or 9 # (at your option) any later version. 10 # 11 # This program is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU General Public License for more details. 15 # 16 # You should have received a copy of the GNU General Public License 17 # along with this program. If not, see [http://www.gnu.org/licenses/]. 18 19 20 import logging 21 import telegram 22 23 24 LAST_UPDATE_ID = None 25 26 27 def main(): 28 global LAST_UPDATE_ID 29 30 logging.basicConfig( 31 format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') 32 33 # Telegram Bot Authorization Token 34 bot = telegram.Bot('TOKEN') 35 36 # This will be our global variable to keep the latest update_id when requesting 37 # for updates. It starts with the latest update_id if available. 38 try: 39 LAST_UPDATE_ID = bot.getUpdates()[-1].update_id 40 except IndexError: 41 LAST_UPDATE_ID = None 42 43 while True: 44 echo(bot) 45 46 47 def echo(bot): 48 global LAST_UPDATE_ID 49 50 # Request updates after the last updated_id 51 for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=10): 52 # chat_id is required to reply any message 53 chat_id = update.message.chat_id 54 message = update.message.text.encode('utf-8') 55 56 if (message): 57 # Reply the message 58 bot.sendMessage(chat_id=chat_id, 59 text=message) 60 61 # Updates global offset to get the new updates 62 LAST_UPDATE_ID = update.update_id + 1 63 64 65 if __name__ == '__main__': 66 main() 67 [end of examples/echobot.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/echobot.py b/examples/echobot.py --- a/examples/echobot.py +++ b/examples/echobot.py @@ -51,12 +51,12 @@ for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=10): # chat_id is required to reply any message chat_id = update.message.chat_id - message = update.message.text.encode('utf-8') + reply_text = update.message.text - if (message): + if (reply_text): # Reply the message bot.sendMessage(chat_id=chat_id, - text=message) + text=reply_text) # Updates global offset to get the new updates LAST_UPDATE_ID = update.update_id + 1
{"golden_diff": "diff --git a/examples/echobot.py b/examples/echobot.py\n--- a/examples/echobot.py\n+++ b/examples/echobot.py\n@@ -51,12 +51,12 @@\n for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=10):\n # chat_id is required to reply any message\n chat_id = update.message.chat_id\n- message = update.message.text.encode('utf-8')\n+ reply_text = update.message.text\n \n- if (message):\n+ if (reply_text):\n # Reply the message\n bot.sendMessage(chat_id=chat_id,\n- text=message)\n+ text=reply_text)\n \n # Updates global offset to get the new updates\n LAST_UPDATE_ID = update.update_id + 1\n", "issue": "@ character\nIf I type @ in a chat I got this. (using https://github.com/leandrotoledo/python-telegram-bot/blob/master/examples/echobot.py)\n\nTypeError: b'hola @honguitobot' is not JSON serializable\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Simple Bot to reply Telegram messages\n# Copyright (C) 2015 Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\n\nimport logging\nimport telegram\n\n\nLAST_UPDATE_ID = None\n\n\ndef main():\n global LAST_UPDATE_ID\n\n logging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # Telegram Bot Authorization Token\n bot = telegram.Bot('TOKEN')\n\n # This will be our global variable to keep the latest update_id when requesting\n # for updates. It starts with the latest update_id if available.\n try:\n LAST_UPDATE_ID = bot.getUpdates()[-1].update_id\n except IndexError:\n LAST_UPDATE_ID = None\n\n while True:\n echo(bot)\n\n\ndef echo(bot):\n global LAST_UPDATE_ID\n\n # Request updates after the last updated_id\n for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=10):\n # chat_id is required to reply any message\n chat_id = update.message.chat_id\n message = update.message.text.encode('utf-8')\n\n if (message):\n # Reply the message\n bot.sendMessage(chat_id=chat_id,\n text=message)\n\n # Updates global offset to get the new updates\n LAST_UPDATE_ID = update.update_id + 1\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/echobot.py"}]}
1,173
171
gh_patches_debug_9136
rasdani/github-patches
git_diff
bokeh__bokeh-5537
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Replace `@$(...)` with `@$el.find(...)` Unnecessary alias. Often people forget that `@$(...) != $(...)`. </issue> <code> [start of sphinx/source/docs/user_guide/examples/extensions_putting_together.py] 1 from bokeh.core.properties import String, Instance 2 from bokeh.models import LayoutDOM, Slider 3 4 CODE =""" 5 import * as _ from "underscore" 6 import * as $ from "jquery" 7 8 import * as p from "core/properties" 9 import {LayoutDOM, LayoutDOMView} from "models/layouts/layout_dom" 10 11 export class CustomView extends LayoutDOMView 12 13 initialize: (options) -> 14 super(options) 15 16 @render() 17 18 # Set Backbone listener so that when the Bokeh slider has a change 19 # event, we can process the new data 20 @listenTo(@model.slider, 'change', () => @render()) 21 22 render: () -> 23 # Backbone Views create <div> elements by default, accessible as @$el. 24 # Many Bokeh views ignore this default <div>, and instead do things 25 # like draw to the HTML canvas. In this case though, we change the 26 # contents of the <div>, based on the current slider value. 27 @$el.html("<h1>#{ @model.text }: #{ @model.slider.value }</h1>") 28 @$('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' }) 29 30 export class Custom extends LayoutDOM 31 32 # If there is an associated view, this is boilerplate. 33 default_view: CustomView 34 35 # The ``type`` class attribute should generally match exactly the name 36 # of the corresponding Python class. 37 type: "Custom" 38 39 # The @define block adds corresponding "properties" to the JS model. These 40 # should basically line up 1-1 with the Python model class. Most property 41 # types have counterparts, e.g. bokeh.core.properties.String will be 42 # p.String in the JS implementation. Where the JS type system is not yet 43 # as rich, you can use p.Any as a "wildcard" property type. 44 @define { 45 text: [ p.String ] 46 slider: [ p.Any ] 47 } 48 """ 49 50 class Custom(LayoutDOM): 51 52 __implementation__ = CODE 53 54 text = String(default="Custom text") 55 56 slider = Instance(Slider) 57 58 from bokeh.io import show 59 60 from bokeh.layouts import column 61 from bokeh.models import Slider 62 63 slider = Slider(start=0, end=10, step=0.1, value=0, title="value") 64 65 custom = Custom(text="Special Slider Display", slider=slider) 66 67 layout = column(slider, custom) 68 69 show(layout) 70 [end of sphinx/source/docs/user_guide/examples/extensions_putting_together.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sphinx/source/docs/user_guide/examples/extensions_putting_together.py b/sphinx/source/docs/user_guide/examples/extensions_putting_together.py --- a/sphinx/source/docs/user_guide/examples/extensions_putting_together.py +++ b/sphinx/source/docs/user_guide/examples/extensions_putting_together.py @@ -25,7 +25,7 @@ # like draw to the HTML canvas. In this case though, we change the # contents of the <div>, based on the current slider value. @$el.html("<h1>#{ @model.text }: #{ @model.slider.value }</h1>") - @$('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' }) + @$el.find('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' }) export class Custom extends LayoutDOM
{"golden_diff": "diff --git a/sphinx/source/docs/user_guide/examples/extensions_putting_together.py b/sphinx/source/docs/user_guide/examples/extensions_putting_together.py\n--- a/sphinx/source/docs/user_guide/examples/extensions_putting_together.py\n+++ b/sphinx/source/docs/user_guide/examples/extensions_putting_together.py\n@@ -25,7 +25,7 @@\n # like draw to the HTML canvas. In this case though, we change the\n # contents of the <div>, based on the current slider value.\n @$el.html(\"<h1>#{ @model.text }: #{ @model.slider.value }</h1>\")\n- @$('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })\n+ @$el.find('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })\n \n export class Custom extends LayoutDOM\n", "issue": "Replace `@$(...)` with `@$el.find(...)`\nUnnecessary alias. Often people forget that `@$(...) != $(...)`.\n", "before_files": [{"content": "from bokeh.core.properties import String, Instance\nfrom bokeh.models import LayoutDOM, Slider\n\nCODE =\"\"\"\nimport * as _ from \"underscore\"\nimport * as $ from \"jquery\"\n\nimport * as p from \"core/properties\"\nimport {LayoutDOM, LayoutDOMView} from \"models/layouts/layout_dom\"\n\nexport class CustomView extends LayoutDOMView\n\n initialize: (options) ->\n super(options)\n\n @render()\n\n # Set Backbone listener so that when the Bokeh slider has a change\n # event, we can process the new data\n @listenTo(@model.slider, 'change', () => @render())\n\n render: () ->\n # Backbone Views create <div> elements by default, accessible as @$el.\n # Many Bokeh views ignore this default <div>, and instead do things\n # like draw to the HTML canvas. In this case though, we change the\n # contents of the <div>, based on the current slider value.\n @$el.html(\"<h1>#{ @model.text }: #{ @model.slider.value }</h1>\")\n @$('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })\n\nexport class Custom extends LayoutDOM\n\n # If there is an associated view, this is boilerplate.\n default_view: CustomView\n\n # The ``type`` class attribute should generally match exactly the name\n # of the corresponding Python class.\n type: \"Custom\"\n\n # The @define block adds corresponding \"properties\" to the JS model. These\n # should basically line up 1-1 with the Python model class. Most property\n # types have counterparts, e.g. bokeh.core.properties.String will be\n # p.String in the JS implementation. Where the JS type system is not yet\n # as rich, you can use p.Any as a \"wildcard\" property type.\n @define {\n text: [ p.String ]\n slider: [ p.Any ]\n }\n\"\"\"\n\nclass Custom(LayoutDOM):\n\n __implementation__ = CODE\n\n text = String(default=\"Custom text\")\n\n slider = Instance(Slider)\n\nfrom bokeh.io import show\n\nfrom bokeh.layouts import column\nfrom bokeh.models import Slider\n\nslider = Slider(start=0, end=10, step=0.1, value=0, title=\"value\")\n\ncustom = Custom(text=\"Special Slider Display\", slider=slider)\n\nlayout = column(slider, custom)\n\nshow(layout)\n", "path": "sphinx/source/docs/user_guide/examples/extensions_putting_together.py"}]}
1,261
208
gh_patches_debug_26190
rasdani/github-patches
git_diff
numpy__numpy-13320
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BUILD: what is generate_mtrand_c.py ? xref #13163 Does anyone know what is ['numpy/random/mtrand/generate_mtrand_c.py'](https://github.com/numpy/numpy/blob/v1.16.2/numpy/random/mtrand/generate_mtrand_c.py) and why it is in ['MANIFEST.in'](https://github.com/numpy/numpy/blob/v1.16.2/MANIFEST.in#L11) ? Is it used by some downstream package? It does not seem to be used in [creating the wheels](https://github.com/MacPython/numpy-wheels) </issue> <code> [start of numpy/random/mtrand/generate_mtrand_c.py] 1 #!/usr/bin/env python 2 from __future__ import division, absolute_import, print_function 3 4 import sys 5 import re 6 import os 7 8 unused_internal_funcs = ['__Pyx_PrintItem', 9 '__Pyx_PrintNewline', 10 '__Pyx_ReRaise', 11 #'__Pyx_GetExcValue', 12 '__Pyx_ArgTypeTest', 13 '__Pyx_SetVtable', 14 '__Pyx_GetVtable', 15 '__Pyx_CreateClass'] 16 17 if __name__ == '__main__': 18 # Use cython here so that long docstrings are broken up. 19 # This is needed for some VC++ compilers. 20 os.system('cython mtrand.pyx') 21 mtrand_c = open('mtrand.c', 'r') 22 processed = open('mtrand_pp.c', 'w') 23 unused_funcs_str = '(' + '|'.join(unused_internal_funcs) + ')' 24 uifpat = re.compile(r'static \w+ \*?'+unused_funcs_str+r'.*/\*proto\*/') 25 linepat = re.compile(r'/\* ".*/mtrand.pyx":') 26 for linenum, line in enumerate(mtrand_c): 27 m = re.match(r'^(\s+arrayObject\w*\s*=\s*[(])[(]PyObject\s*[*][)]', 28 line) 29 if m: 30 line = '%s(PyArrayObject *)%s' % (m.group(1), line[m.end():]) 31 m = uifpat.match(line) 32 if m: 33 line = '' 34 m = re.search(unused_funcs_str, line) 35 if m: 36 print("%s was declared unused, but is used at line %d" % (m.group(), 37 linenum+1), file=sys.stderr) 38 line = linepat.sub(r'/* "mtrand.pyx":', line) 39 processed.write(line) 40 mtrand_c.close() 41 processed.close() 42 os.rename('mtrand_pp.c', 'mtrand.c') 43 [end of numpy/random/mtrand/generate_mtrand_c.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/numpy/random/mtrand/generate_mtrand_c.py b/numpy/random/mtrand/generate_mtrand_c.py deleted file mode 100644 --- a/numpy/random/mtrand/generate_mtrand_c.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, absolute_import, print_function - -import sys -import re -import os - -unused_internal_funcs = ['__Pyx_PrintItem', - '__Pyx_PrintNewline', - '__Pyx_ReRaise', - #'__Pyx_GetExcValue', - '__Pyx_ArgTypeTest', - '__Pyx_SetVtable', - '__Pyx_GetVtable', - '__Pyx_CreateClass'] - -if __name__ == '__main__': - # Use cython here so that long docstrings are broken up. - # This is needed for some VC++ compilers. - os.system('cython mtrand.pyx') - mtrand_c = open('mtrand.c', 'r') - processed = open('mtrand_pp.c', 'w') - unused_funcs_str = '(' + '|'.join(unused_internal_funcs) + ')' - uifpat = re.compile(r'static \w+ \*?'+unused_funcs_str+r'.*/\*proto\*/') - linepat = re.compile(r'/\* ".*/mtrand.pyx":') - for linenum, line in enumerate(mtrand_c): - m = re.match(r'^(\s+arrayObject\w*\s*=\s*[(])[(]PyObject\s*[*][)]', - line) - if m: - line = '%s(PyArrayObject *)%s' % (m.group(1), line[m.end():]) - m = uifpat.match(line) - if m: - line = '' - m = re.search(unused_funcs_str, line) - if m: - print("%s was declared unused, but is used at line %d" % (m.group(), - linenum+1), file=sys.stderr) - line = linepat.sub(r'/* "mtrand.pyx":', line) - processed.write(line) - mtrand_c.close() - processed.close() - os.rename('mtrand_pp.c', 'mtrand.c')
{"golden_diff": "diff --git a/numpy/random/mtrand/generate_mtrand_c.py b/numpy/random/mtrand/generate_mtrand_c.py\ndeleted file mode 100644\n--- a/numpy/random/mtrand/generate_mtrand_c.py\n+++ /dev/null\n@@ -1,42 +0,0 @@\n-#!/usr/bin/env python\n-from __future__ import division, absolute_import, print_function\n-\n-import sys\n-import re\n-import os\n-\n-unused_internal_funcs = ['__Pyx_PrintItem',\n- '__Pyx_PrintNewline',\n- '__Pyx_ReRaise',\n- #'__Pyx_GetExcValue',\n- '__Pyx_ArgTypeTest',\n- '__Pyx_SetVtable',\n- '__Pyx_GetVtable',\n- '__Pyx_CreateClass']\n-\n-if __name__ == '__main__':\n- # Use cython here so that long docstrings are broken up.\n- # This is needed for some VC++ compilers.\n- os.system('cython mtrand.pyx')\n- mtrand_c = open('mtrand.c', 'r')\n- processed = open('mtrand_pp.c', 'w')\n- unused_funcs_str = '(' + '|'.join(unused_internal_funcs) + ')'\n- uifpat = re.compile(r'static \\w+ \\*?'+unused_funcs_str+r'.*/\\*proto\\*/')\n- linepat = re.compile(r'/\\* \".*/mtrand.pyx\":')\n- for linenum, line in enumerate(mtrand_c):\n- m = re.match(r'^(\\s+arrayObject\\w*\\s*=\\s*[(])[(]PyObject\\s*[*][)]',\n- line)\n- if m:\n- line = '%s(PyArrayObject *)%s' % (m.group(1), line[m.end():])\n- m = uifpat.match(line)\n- if m:\n- line = ''\n- m = re.search(unused_funcs_str, line)\n- if m:\n- print(\"%s was declared unused, but is used at line %d\" % (m.group(),\n- linenum+1), file=sys.stderr)\n- line = linepat.sub(r'/* \"mtrand.pyx\":', line)\n- processed.write(line)\n- mtrand_c.close()\n- processed.close()\n- os.rename('mtrand_pp.c', 'mtrand.c')\n", "issue": "BUILD: what is generate_mtrand_c.py ?\nxref #13163 \r\n\r\nDoes anyone know what is ['numpy/random/mtrand/generate_mtrand_c.py'](https://github.com/numpy/numpy/blob/v1.16.2/numpy/random/mtrand/generate_mtrand_c.py) and why it is in ['MANIFEST.in'](https://github.com/numpy/numpy/blob/v1.16.2/MANIFEST.in#L11) ? Is it used by some downstream package? It does not seem to be used in [creating the wheels](https://github.com/MacPython/numpy-wheels)\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import division, absolute_import, print_function\n\nimport sys\nimport re\nimport os\n\nunused_internal_funcs = ['__Pyx_PrintItem',\n '__Pyx_PrintNewline',\n '__Pyx_ReRaise',\n #'__Pyx_GetExcValue',\n '__Pyx_ArgTypeTest',\n '__Pyx_SetVtable',\n '__Pyx_GetVtable',\n '__Pyx_CreateClass']\n\nif __name__ == '__main__':\n # Use cython here so that long docstrings are broken up.\n # This is needed for some VC++ compilers.\n os.system('cython mtrand.pyx')\n mtrand_c = open('mtrand.c', 'r')\n processed = open('mtrand_pp.c', 'w')\n unused_funcs_str = '(' + '|'.join(unused_internal_funcs) + ')'\n uifpat = re.compile(r'static \\w+ \\*?'+unused_funcs_str+r'.*/\\*proto\\*/')\n linepat = re.compile(r'/\\* \".*/mtrand.pyx\":')\n for linenum, line in enumerate(mtrand_c):\n m = re.match(r'^(\\s+arrayObject\\w*\\s*=\\s*[(])[(]PyObject\\s*[*][)]',\n line)\n if m:\n line = '%s(PyArrayObject *)%s' % (m.group(1), line[m.end():])\n m = uifpat.match(line)\n if m:\n line = ''\n m = re.search(unused_funcs_str, line)\n if m:\n print(\"%s was declared unused, but is used at line %d\" % (m.group(),\n linenum+1), file=sys.stderr)\n line = linepat.sub(r'/* \"mtrand.pyx\":', line)\n processed.write(line)\n mtrand_c.close()\n processed.close()\n os.rename('mtrand_pp.c', 'mtrand.c')\n", "path": "numpy/random/mtrand/generate_mtrand_c.py"}]}
1,177
523
gh_patches_debug_36488
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-2871
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider rubios is broken During the global build at 2021-05-26-14-42-23, spider **rubios** failed with **0 features** and **0 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/rubios.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rubios.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rubios.geojson)) </issue> <code> [start of locations/spiders/rubios.py] 1 # -*- coding: utf-8 -*- 2 import scrapy 3 import re 4 5 from locations.items import GeojsonPointItem 6 7 8 class RubiosSpider(scrapy.Spider): 9 name = "rubios" 10 item_attributes = { 'brand': "Rubio's" } 11 allowed_domains = ['rubios.com'] 12 start_urls = ( 13 'https://www.rubios.com/sitemap.xml', 14 ) 15 16 def parse(self, response): 17 response.selector.remove_namespaces() 18 city_urls = response.xpath('//url/loc/text()').extract() 19 regex = re.compile(r'http\S+rubios.com/store-locations/\S+/\S+/\S+') 20 for path in city_urls: 21 if re.search(regex, path): 22 yield scrapy.Request( 23 path.strip(), 24 callback=self.parse_store, 25 ) 26 27 def parse_store(self, response): 28 29 properties = { 30 'name': response.xpath('//span[@itemprop="name"]/text()').extract_first(), 31 'ref': response.xpath('//span[@itemprop="name"]/text()').extract_first(), 32 'addr_full': response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(), 33 'city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first(), 34 'state': response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first(), 35 'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(), 36 'phone': response.xpath('//span[@itemprop="telephone"]/a/text()').extract_first(), 37 'website': response.url, 38 'opening_hours': "".join(response.xpath('//div/div/div/span/span/span/text()').extract()).strip(), 39 'lon': float(response.xpath('//head/script[9]').extract_first().split('"coordinates":[')[1].split(']')[0].split(',')[0]), 40 'lat': float(response.xpath('//head/script[9]').extract_first().split('"coordinates":[')[1].split(']')[0].split(',')[1]), 41 } 42 43 yield GeojsonPointItem(**properties) 44 [end of locations/spiders/rubios.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/rubios.py b/locations/spiders/rubios.py --- a/locations/spiders/rubios.py +++ b/locations/spiders/rubios.py @@ -16,7 +16,7 @@ def parse(self, response): response.selector.remove_namespaces() city_urls = response.xpath('//url/loc/text()').extract() - regex = re.compile(r'http\S+rubios.com/store-locations/\S+/\S+/\S+') + regex = re.compile(r'http\S+rubios.com/restaurant-locations/\S+/\S+/\S+') for path in city_urls: if re.search(regex, path): yield scrapy.Request( @@ -27,17 +27,18 @@ def parse_store(self, response): properties = { - 'name': response.xpath('//span[@itemprop="name"]/text()').extract_first(), - 'ref': response.xpath('//span[@itemprop="name"]/text()').extract_first(), - 'addr_full': response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(), - 'city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first(), - 'state': response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first(), - 'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(), - 'phone': response.xpath('//span[@itemprop="telephone"]/a/text()').extract_first(), + 'name': response.xpath('//meta[@property="og:title"]/@content').extract_first(), + 'ref': response.url, + 'addr_full': response.xpath('//meta[@property="og:street_address"]/@content').extract_first(), + 'city': response.xpath('//meta[@property="og:locality"]/@content').extract_first(), + 'state': response.xpath('//meta[@property="og:region"]/@content').extract_first(), + 'postcode': response.xpath('//meta[@property="og:postal_code"]/@content').extract_first(), + 'country': response.xpath('//meta[@property="og:country_name"]/@content').extract_first(), + 'phone': response.xpath('//@href[contains(.,"tel:")]').extract_first().replace('tel:', ''), 'website': response.url, - 'opening_hours': "".join(response.xpath('//div/div/div/span/span/span/text()').extract()).strip(), - 'lon': float(response.xpath('//head/script[9]').extract_first().split('"coordinates":[')[1].split(']')[0].split(',')[0]), - 'lat': float(response.xpath('//head/script[9]').extract_first().split('"coordinates":[')[1].split(']')[0].split(',')[1]), + 'opening_hours': ''.join(response.css('.oh-wrapper ::text').extract()), + 'lon': response.xpath('//meta[@property="og:longitude"]/@content').extract_first(), + 'lat': response.xpath('//meta[@property="og:latitude"]/@content').extract_first(), } yield GeojsonPointItem(**properties)
{"golden_diff": "diff --git a/locations/spiders/rubios.py b/locations/spiders/rubios.py\n--- a/locations/spiders/rubios.py\n+++ b/locations/spiders/rubios.py\n@@ -16,7 +16,7 @@\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n- regex = re.compile(r'http\\S+rubios.com/store-locations/\\S+/\\S+/\\S+')\n+ regex = re.compile(r'http\\S+rubios.com/restaurant-locations/\\S+/\\S+/\\S+')\n for path in city_urls:\n if re.search(regex, path):\n yield scrapy.Request(\n@@ -27,17 +27,18 @@\n def parse_store(self, response):\n \n properties = {\n- 'name': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first(),\n- 'ref': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first(),\n- 'addr_full': response.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first(),\n- 'city': response.xpath('//span[@itemprop=\"addressLocality\"]/text()').extract_first(),\n- 'state': response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first(),\n- 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n- 'phone': response.xpath('//span[@itemprop=\"telephone\"]/a/text()').extract_first(),\n+ 'name': response.xpath('//meta[@property=\"og:title\"]/@content').extract_first(),\n+ 'ref': response.url,\n+ 'addr_full': response.xpath('//meta[@property=\"og:street_address\"]/@content').extract_first(),\n+ 'city': response.xpath('//meta[@property=\"og:locality\"]/@content').extract_first(),\n+ 'state': response.xpath('//meta[@property=\"og:region\"]/@content').extract_first(),\n+ 'postcode': response.xpath('//meta[@property=\"og:postal_code\"]/@content').extract_first(),\n+ 'country': response.xpath('//meta[@property=\"og:country_name\"]/@content').extract_first(),\n+ 'phone': response.xpath('//@href[contains(.,\"tel:\")]').extract_first().replace('tel:', ''),\n 'website': response.url,\n- 'opening_hours': \"\".join(response.xpath('//div/div/div/span/span/span/text()').extract()).strip(),\n- 'lon': float(response.xpath('//head/script[9]').extract_first().split('\"coordinates\":[')[1].split(']')[0].split(',')[0]),\n- 'lat': float(response.xpath('//head/script[9]').extract_first().split('\"coordinates\":[')[1].split(']')[0].split(',')[1]),\n+ 'opening_hours': ''.join(response.css('.oh-wrapper ::text').extract()),\n+ 'lon': response.xpath('//meta[@property=\"og:longitude\"]/@content').extract_first(),\n+ 'lat': response.xpath('//meta[@property=\"og:latitude\"]/@content').extract_first(),\n }\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider rubios is broken\nDuring the global build at 2021-05-26-14-42-23, spider **rubios** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/rubios.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rubios.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rubios.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass RubiosSpider(scrapy.Spider):\n name = \"rubios\"\n item_attributes = { 'brand': \"Rubio's\" }\n allowed_domains = ['rubios.com']\n start_urls = (\n 'https://www.rubios.com/sitemap.xml',\n )\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n regex = re.compile(r'http\\S+rubios.com/store-locations/\\S+/\\S+/\\S+')\n for path in city_urls:\n if re.search(regex, path):\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n\n properties = {\n 'name': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first(),\n 'ref': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first(),\n 'addr_full': response.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first(),\n 'city': response.xpath('//span[@itemprop=\"addressLocality\"]/text()').extract_first(),\n 'state': response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first(),\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'phone': response.xpath('//span[@itemprop=\"telephone\"]/a/text()').extract_first(),\n 'website': response.url,\n 'opening_hours': \"\".join(response.xpath('//div/div/div/span/span/span/text()').extract()).strip(),\n 'lon': float(response.xpath('//head/script[9]').extract_first().split('\"coordinates\":[')[1].split(']')[0].split(',')[0]),\n 'lat': float(response.xpath('//head/script[9]').extract_first().split('\"coordinates\":[')[1].split(']')[0].split(',')[1]),\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/rubios.py"}]}
1,245
684
gh_patches_debug_63976
rasdani/github-patches
git_diff
buildbot__buildbot-1214
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> New web plugin idea: a health indicator This ticket is a migrated Trac ticket [2966](http://trac.buildbot.net/ticket/2966) People contributed to the original ticket: `benoit.allard@...`, @tardyp, @rutsky, @unknown_contributor, @sa2ajj, @rutsky Ticket created on: `Oct 24 2014` Ticket last modified on: `Mar 19 2016` --- I like to extract as much useful indicator from my builds as possible (time, but also amount of warnings, and such ...) It would be cool to have a web plugin that could print the evolution of my indicators over time ! (Of course, I would have to configure which indicator I want to see plotted, maybe the kind of plot, and so on ...) --- _Comment from_: @sa2ajj _Date_: `Oct 24 2014` Could you please elaborate or provide a more specific example? I think it's related to the [[metrics support|http://docs.buildbot.net/latest/developer/metrics.html]], but without an example I can easily be wrong :) --- _Comment from_: @Ben _Date_: `Oct 24 2014` I was more aiming at [[Statistics|http://docs.buildbot.net/latest/developer/cls-buildsteps.html?highlight=statistics#buildbot.process.buildstep.[[BuildStep]].hasStatistic]], but I just realized that Note that statistics are not preserved after a build is complete. So metrics is probably where we want to interface with the master. I used to abuse Properties for that purpose ... --- _Comment from_: @tardyp _Date_: `Oct 24 2014` Buildbot plugin system is really made for enabling such dashboards. A web ui plugin is not technically restricted to creating a bunch of js file, it could also create a twisted service. For me having the JS only use existing data api to query the data will be very inefficient. I think we could easily create a service, like a status service that registers to a bunch of mq events, and create statistics based on that. I also had in mind that plugins could have some table in the DB they could use to store the data, or maybe use another db configuration with all schema + migration stuff separated. --- _Comment from_: @tardyp _Date_: `Oct 26 2014` on IRC, sa2ajj talked about http://graphite.readthedocs.org/ He told us, he actually uses it at work, and has integration working with eigth. Looking at the documentation, the first reaction is how to integrate this with multimaster, as graphite has its own db called whisper. I haven't look too much deeply, but I think this is still feasible as a external tool. Probably this would be much cheaper than making our own metrics system inside buildbot. An external graphite server could be setup, and watch for the (TBD) mq server. As there are messages for nearly every kind of activity that happens in buildbot, this is a good mean of making solid analysis of what is going on. Of course, this solution would not be fully integrated, as probably the UI would be external, but anyway, I think this is a possible cheap path. @sa2ajj do you think it is possible? How would you estimate the cost of integration? --- _Comment from_: @Ben _Date_: `Oct 26 2014` There are a lot of [[interfaces|http://graphite.readthedocs.org/en/latest/tools.html#visualization]], as well as data collectors / forwarders (same page, a bit higher) available for graphite. It looks like some of them are js-only ! Still way to go for a www-plugin ! --- _Comment from_: @sa2ajj _Date_: `Oct 27 2014` What I said was that we indeed use graphite, but I did not say we use it with Buildbot. I have an oldish branch where I tried to publish metrics to graphite. However I stopped working on that when I realised that it's not very straightforward to implement it to support multi-master case. If there's an interest, I can revive the branch or, at least, publish what I have (after rebasing the latest master) so other could comment in what direction my thinking worked. --- _Comment from_: @unknown_contributor _Date_: `Mar 19 2016` +1 for this feature. I suggest to merge the statistic API with the [[stats module|https://github.com/buildbot/buildbot/tree/master/master/buildbot/statistics]] developed last year by my GSOC student. And add a default backend that store a subset of these stat in the main database that would enable the development of heath/stats visualization modules directly installed by default (using Highchart js lib for example) </issue> <code> [start of pkg/setup.py] 1 #!/usr/bin/env python 2 # 3 # This file is part of Buildbot. Buildbot is free software: you can 4 # redistribute it and/or modify it under the terms of the GNU General Public 5 # License as published by the Free Software Foundation, version 2. 6 # 7 # This program is distributed in the hope that it will be useful, but WITHOUT 8 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 9 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more 10 # details. 11 # 12 # You should have received a copy of the GNU General Public License along with 13 # this program; if not, write to the Free Software Foundation, Inc., 51 14 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 15 # 16 # Copyright Buildbot Team Members 17 18 from setuptools import setup 19 import buildbot_pkg 20 21 setup( 22 name='buildbot_pkg', 23 version=buildbot_pkg.getVersion("."), 24 description='Buildbot packaging tools', 25 author=u'Pierre Tardy', 26 author_email=u'[email protected]', 27 url='http://buildbot.net/', 28 license='GNU GPL', 29 py_modules=['buildbot_pkg'], 30 ) 31 [end of pkg/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pkg/setup.py b/pkg/setup.py --- a/pkg/setup.py +++ b/pkg/setup.py @@ -19,7 +19,7 @@ import buildbot_pkg setup( - name='buildbot_pkg', + name='buildbot-pkg', version=buildbot_pkg.getVersion("."), description='Buildbot packaging tools', author=u'Pierre Tardy',
{"golden_diff": "diff --git a/pkg/setup.py b/pkg/setup.py\n--- a/pkg/setup.py\n+++ b/pkg/setup.py\n@@ -19,7 +19,7 @@\n import buildbot_pkg\n \n setup(\n- name='buildbot_pkg',\n+ name='buildbot-pkg',\n version=buildbot_pkg.getVersion(\".\"),\n description='Buildbot packaging tools',\n author=u'Pierre Tardy',\n", "issue": "New web plugin idea: a health indicator\nThis ticket is a migrated Trac ticket [2966](http://trac.buildbot.net/ticket/2966)\n\nPeople contributed to the original ticket: `benoit.allard@...`, @tardyp, @rutsky, @unknown_contributor, @sa2ajj, @rutsky\nTicket created on: `Oct 24 2014`\nTicket last modified on: `Mar 19 2016`\n\n---\n\nI like to extract as much useful indicator from my builds as possible (time, but also amount of warnings, and such ...)\n\nIt would be cool to have a web plugin that could print the evolution of my indicators over time ! (Of course, I would have to configure which indicator I want to see plotted, maybe the kind of plot, and so on ...)\n\n\n---\n\n_Comment from_: @sa2ajj\n_Date_: `Oct 24 2014`\n\nCould you please elaborate or provide a more specific example?\n\nI think it's related to the [[metrics support|http://docs.buildbot.net/latest/developer/metrics.html]], but without an example I can easily be wrong :)\n\n---\n_Comment from_: @Ben\n_Date_: `Oct 24 2014`\n\nI was more aiming at [[Statistics|http://docs.buildbot.net/latest/developer/cls-buildsteps.html?highlight=statistics#buildbot.process.buildstep.[[BuildStep]].hasStatistic]], but I just realized that\n\n Note that statistics are not preserved after a build is complete.\n\nSo metrics is probably where we want to interface with the master.\n\nI used to abuse Properties for that purpose ...\n\n---\n_Comment from_: @tardyp\n_Date_: `Oct 24 2014`\n\nBuildbot plugin system is really made for enabling such dashboards.\n\nA web ui plugin is not technically restricted to creating a bunch of js file, it could also create a twisted service.\n\nFor me having the JS only use existing data api to query the data will be very inefficient. I think we could easily create a service, like a status service that registers to a bunch of mq events, and create statistics based on that.\n\nI also had in mind that plugins could have some table in the DB they could use to store the data, or maybe use another db configuration with all schema + migration stuff separated.\n\n\n---\n_Comment from_: @tardyp\n_Date_: `Oct 26 2014`\n\non IRC, sa2ajj talked about http://graphite.readthedocs.org/\n\nHe told us, he actually uses it at work, and has integration working with eigth.\n\nLooking at the documentation, the first reaction is how to integrate this with multimaster, as graphite has its own db called whisper.\nI haven't look too much deeply, but I think this is still feasible as a external tool. Probably this would be much cheaper than making our own metrics system inside buildbot.\n\nAn external graphite server could be setup, and watch for the (TBD) mq server. As there are messages for nearly every kind of activity that happens in buildbot, this is a good mean of making solid analysis of what is going on. Of course, this solution would not be fully integrated, as probably the UI would be external, but anyway, I think this is a possible cheap path.\n\n@sa2ajj do you think it is possible? How would you estimate the cost of integration?\n\n\n\n\n---\n_Comment from_: @Ben\n_Date_: `Oct 26 2014`\n\nThere are a lot of [[interfaces|http://graphite.readthedocs.org/en/latest/tools.html#visualization]], as well as data collectors / forwarders (same page, a bit higher) available for graphite. It looks like some of them are js-only ! Still way to go for a www-plugin !\n\n---\n_Comment from_: @sa2ajj\n_Date_: `Oct 27 2014`\n\nWhat I said was that we indeed use graphite, but I did not say we use it with Buildbot.\n\nI have an oldish branch where I tried to publish metrics to graphite. However I stopped working on that when I realised that it's not very straightforward to implement it to support multi-master case.\n\nIf there's an interest, I can revive the branch or, at least, publish what I have (after rebasing the latest master) so other could comment in what direction my thinking worked.\n\n---\n_Comment from_: @unknown_contributor\n_Date_: `Mar 19 2016`\n\n+1 for this feature. I suggest to merge the statistic API with the [[stats module|https://github.com/buildbot/buildbot/tree/master/master/buildbot/statistics]] developed last year by my GSOC student. And add a default backend that store a subset of these stat in the main database that would enable the development of heath/stats visualization modules directly installed by default (using Highchart js lib for example) \n\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom setuptools import setup\nimport buildbot_pkg\n\nsetup(\n name='buildbot_pkg',\n version=buildbot_pkg.getVersion(\".\"),\n description='Buildbot packaging tools',\n author=u'Pierre Tardy',\n author_email=u'[email protected]',\n url='http://buildbot.net/',\n license='GNU GPL',\n py_modules=['buildbot_pkg'],\n)\n", "path": "pkg/setup.py"}]}
1,881
87
gh_patches_debug_20490
rasdani/github-patches
git_diff
openvinotoolkit__datumaro-1284
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Automatic generation of specs.json by running specs.py is broken After merging https://github.com/openvinotoolkit/datumaro/pull/1268, `metadata` is newly introduced in `specs.json`, but the automated `specs.json` doesn't create `metadata`, and hence it breaks the lazy_load of importers. </issue> <code> [start of src/datumaro/plugins/specs.py] 1 # Copyright (C) 2023 Intel Corporation 2 # 3 # SPDX-License-Identifier: MIT 4 5 from pathlib import Path 6 7 from datumaro.components.lazy_plugin import get_extra_deps, get_lazy_plugin 8 from datumaro.util import parse_json_file 9 10 _SOURCE_PATH = Path(__file__).resolve() 11 _SOURCE_DIR = _SOURCE_PATH.parent 12 _SPECS_JSON_PATH = _SOURCE_DIR / "specs.json" 13 14 15 def get_lazy_plugins(): 16 return [ 17 plugin 18 for plugin in [ 19 get_lazy_plugin( 20 spec["import_path"], 21 spec["plugin_name"], 22 spec["plugin_type"], 23 spec.get("extra_deps", []), 24 spec.get("metadata", {}), 25 ) 26 for spec in parse_json_file(str(_SPECS_JSON_PATH)) 27 ] 28 if plugin is not None 29 ] 30 31 32 if __name__ == "__main__": 33 from datumaro.components.environment import Environment 34 from datumaro.util import dump_json_file 35 36 env = Environment(use_lazy_import=False) 37 plugin_specs = [] 38 39 def _enroll_to_plugin_specs(plugins, plugin_type): 40 global plugin_specs 41 42 for _, plugin in plugins.items(): 43 mod = plugin.__module__ 44 class_name = plugin.__name__ 45 plugin_name = plugin.NAME 46 plugin_specs += [ 47 { 48 "import_path": f"{mod}.{class_name}", 49 "plugin_name": plugin_name, 50 "plugin_type": plugin_type, 51 "extra_deps": get_extra_deps(plugin), 52 } 53 ] 54 55 _enroll_to_plugin_specs(env.extractors, "DatasetBase") 56 _enroll_to_plugin_specs(env.importers, "Importer") 57 _enroll_to_plugin_specs(env.launchers, "Launcher") 58 _enroll_to_plugin_specs(env.exporters, "Exporter") 59 _enroll_to_plugin_specs(env.generators, "DatasetGenerator") 60 _enroll_to_plugin_specs(env.transforms, "Transform") 61 _enroll_to_plugin_specs(env.validators, "Validator") 62 63 dump_json_file( 64 _SPECS_JSON_PATH, 65 sorted(plugin_specs, key=lambda spec: spec["import_path"]), 66 indent=True, 67 append_newline=True, 68 ) 69 [end of src/datumaro/plugins/specs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/datumaro/plugins/specs.py b/src/datumaro/plugins/specs.py --- a/src/datumaro/plugins/specs.py +++ b/src/datumaro/plugins/specs.py @@ -43,14 +43,18 @@ mod = plugin.__module__ class_name = plugin.__name__ plugin_name = plugin.NAME - plugin_specs += [ - { - "import_path": f"{mod}.{class_name}", - "plugin_name": plugin_name, - "plugin_type": plugin_type, - "extra_deps": get_extra_deps(plugin), - } - ] + extra_deps = get_extra_deps(plugin) + plugin_spec = { + "import_path": f"{mod}.{class_name}", + "plugin_name": plugin_name, + "plugin_type": plugin_type, + } + if extra_deps: + plugin_spec["extra_deps"] = extra_deps + # Setting metadata for importers + if hasattr(plugin, "get_file_extensions"): + plugin_spec["metadata"] = {"file_extensions": sorted(plugin.get_file_extensions())} + plugin_specs.append(plugin_spec) _enroll_to_plugin_specs(env.extractors, "DatasetBase") _enroll_to_plugin_specs(env.importers, "Importer")
{"golden_diff": "diff --git a/src/datumaro/plugins/specs.py b/src/datumaro/plugins/specs.py\n--- a/src/datumaro/plugins/specs.py\n+++ b/src/datumaro/plugins/specs.py\n@@ -43,14 +43,18 @@\n mod = plugin.__module__\n class_name = plugin.__name__\n plugin_name = plugin.NAME\n- plugin_specs += [\n- {\n- \"import_path\": f\"{mod}.{class_name}\",\n- \"plugin_name\": plugin_name,\n- \"plugin_type\": plugin_type,\n- \"extra_deps\": get_extra_deps(plugin),\n- }\n- ]\n+ extra_deps = get_extra_deps(plugin)\n+ plugin_spec = {\n+ \"import_path\": f\"{mod}.{class_name}\",\n+ \"plugin_name\": plugin_name,\n+ \"plugin_type\": plugin_type,\n+ }\n+ if extra_deps:\n+ plugin_spec[\"extra_deps\"] = extra_deps\n+ # Setting metadata for importers\n+ if hasattr(plugin, \"get_file_extensions\"):\n+ plugin_spec[\"metadata\"] = {\"file_extensions\": sorted(plugin.get_file_extensions())}\n+ plugin_specs.append(plugin_spec)\n \n _enroll_to_plugin_specs(env.extractors, \"DatasetBase\")\n _enroll_to_plugin_specs(env.importers, \"Importer\")\n", "issue": "Automatic generation of specs.json by running specs.py is broken\nAfter merging https://github.com/openvinotoolkit/datumaro/pull/1268, `metadata` is newly introduced in `specs.json`, but the automated `specs.json` doesn't create `metadata`, and hence it breaks the lazy_load of importers.\n", "before_files": [{"content": "# Copyright (C) 2023 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom pathlib import Path\n\nfrom datumaro.components.lazy_plugin import get_extra_deps, get_lazy_plugin\nfrom datumaro.util import parse_json_file\n\n_SOURCE_PATH = Path(__file__).resolve()\n_SOURCE_DIR = _SOURCE_PATH.parent\n_SPECS_JSON_PATH = _SOURCE_DIR / \"specs.json\"\n\n\ndef get_lazy_plugins():\n return [\n plugin\n for plugin in [\n get_lazy_plugin(\n spec[\"import_path\"],\n spec[\"plugin_name\"],\n spec[\"plugin_type\"],\n spec.get(\"extra_deps\", []),\n spec.get(\"metadata\", {}),\n )\n for spec in parse_json_file(str(_SPECS_JSON_PATH))\n ]\n if plugin is not None\n ]\n\n\nif __name__ == \"__main__\":\n from datumaro.components.environment import Environment\n from datumaro.util import dump_json_file\n\n env = Environment(use_lazy_import=False)\n plugin_specs = []\n\n def _enroll_to_plugin_specs(plugins, plugin_type):\n global plugin_specs\n\n for _, plugin in plugins.items():\n mod = plugin.__module__\n class_name = plugin.__name__\n plugin_name = plugin.NAME\n plugin_specs += [\n {\n \"import_path\": f\"{mod}.{class_name}\",\n \"plugin_name\": plugin_name,\n \"plugin_type\": plugin_type,\n \"extra_deps\": get_extra_deps(plugin),\n }\n ]\n\n _enroll_to_plugin_specs(env.extractors, \"DatasetBase\")\n _enroll_to_plugin_specs(env.importers, \"Importer\")\n _enroll_to_plugin_specs(env.launchers, \"Launcher\")\n _enroll_to_plugin_specs(env.exporters, \"Exporter\")\n _enroll_to_plugin_specs(env.generators, \"DatasetGenerator\")\n _enroll_to_plugin_specs(env.transforms, \"Transform\")\n _enroll_to_plugin_specs(env.validators, \"Validator\")\n\n dump_json_file(\n _SPECS_JSON_PATH,\n sorted(plugin_specs, key=lambda spec: spec[\"import_path\"]),\n indent=True,\n append_newline=True,\n )\n", "path": "src/datumaro/plugins/specs.py"}]}
1,194
283
gh_patches_debug_4520
rasdani/github-patches
git_diff
pytorch__vision-357
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error closing: 'NoneType' object has no attribute 'close' for ImageNet reading When loading ImageNet, I get the following debug messages `Error closing: 'NoneType' object has no attribute 'close' for ImageNet reading` message called from https://github.com/pytorch/vision/blob/45dab0e4528333332f07ab1979e006857563cf99/torchvision/datasets/folder.py#L46 caused by https://github.com/python-pillow/Pillow/blob/7d8c0d9e39d5ed79ea6aa590b9cbff5b0d17248c/PIL/Image.py#L573 </issue> <code> [start of torchvision/datasets/folder.py] 1 import torch.utils.data as data 2 3 from PIL import Image 4 import os 5 import os.path 6 7 IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm'] 8 9 10 def is_image_file(filename): 11 """Checks if a file is an image. 12 13 Args: 14 filename (string): path to a file 15 16 Returns: 17 bool: True if the filename ends with a known image extension 18 """ 19 filename_lower = filename.lower() 20 return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS) 21 22 23 def find_classes(dir): 24 classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] 25 classes.sort() 26 class_to_idx = {classes[i]: i for i in range(len(classes))} 27 return classes, class_to_idx 28 29 30 def make_dataset(dir, class_to_idx): 31 images = [] 32 dir = os.path.expanduser(dir) 33 for target in sorted(os.listdir(dir)): 34 d = os.path.join(dir, target) 35 if not os.path.isdir(d): 36 continue 37 38 for root, _, fnames in sorted(os.walk(d)): 39 for fname in sorted(fnames): 40 if is_image_file(fname): 41 path = os.path.join(root, fname) 42 item = (path, class_to_idx[target]) 43 images.append(item) 44 45 return images 46 47 48 def pil_loader(path): 49 # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835) 50 with open(path, 'rb') as f: 51 with Image.open(f) as img: 52 return img.convert('RGB') 53 54 55 def accimage_loader(path): 56 import accimage 57 try: 58 return accimage.Image(path) 59 except IOError: 60 # Potentially a decoding problem, fall back to PIL.Image 61 return pil_loader(path) 62 63 64 def default_loader(path): 65 from torchvision import get_image_backend 66 if get_image_backend() == 'accimage': 67 return accimage_loader(path) 68 else: 69 return pil_loader(path) 70 71 72 class ImageFolder(data.Dataset): 73 """A generic data loader where the images are arranged in this way: :: 74 75 root/dog/xxx.png 76 root/dog/xxy.png 77 root/dog/xxz.png 78 79 root/cat/123.png 80 root/cat/nsdf3.png 81 root/cat/asd932_.png 82 83 Args: 84 root (string): Root directory path. 85 transform (callable, optional): A function/transform that takes in an PIL image 86 and returns a transformed version. E.g, ``transforms.RandomCrop`` 87 target_transform (callable, optional): A function/transform that takes in the 88 target and transforms it. 89 loader (callable, optional): A function to load an image given its path. 90 91 Attributes: 92 classes (list): List of the class names. 93 class_to_idx (dict): Dict with items (class_name, class_index). 94 imgs (list): List of (image path, class_index) tuples 95 """ 96 97 def __init__(self, root, transform=None, target_transform=None, 98 loader=default_loader): 99 classes, class_to_idx = find_classes(root) 100 imgs = make_dataset(root, class_to_idx) 101 if len(imgs) == 0: 102 raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n" 103 "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) 104 105 self.root = root 106 self.imgs = imgs 107 self.classes = classes 108 self.class_to_idx = class_to_idx 109 self.transform = transform 110 self.target_transform = target_transform 111 self.loader = loader 112 113 def __getitem__(self, index): 114 """ 115 Args: 116 index (int): Index 117 118 Returns: 119 tuple: (image, target) where target is class_index of the target class. 120 """ 121 path, target = self.imgs[index] 122 img = self.loader(path) 123 if self.transform is not None: 124 img = self.transform(img) 125 if self.target_transform is not None: 126 target = self.target_transform(target) 127 128 return img, target 129 130 def __len__(self): 131 return len(self.imgs) 132 [end of torchvision/datasets/folder.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py --- a/torchvision/datasets/folder.py +++ b/torchvision/datasets/folder.py @@ -48,8 +48,8 @@ def pil_loader(path): # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835) with open(path, 'rb') as f: - with Image.open(f) as img: - return img.convert('RGB') + img = Image.open(f) + return img.convert('RGB') def accimage_loader(path):
{"golden_diff": "diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py\n--- a/torchvision/datasets/folder.py\n+++ b/torchvision/datasets/folder.py\n@@ -48,8 +48,8 @@\n def pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n- with Image.open(f) as img:\n- return img.convert('RGB')\n+ img = Image.open(f)\n+ return img.convert('RGB')\n \n \n def accimage_loader(path):\n", "issue": "Error closing: 'NoneType' object has no attribute 'close' for ImageNet reading\nWhen loading ImageNet, I get the following debug messages\r\n`Error closing: 'NoneType' object has no attribute 'close' for ImageNet reading`\r\nmessage called from https://github.com/pytorch/vision/blob/45dab0e4528333332f07ab1979e006857563cf99/torchvision/datasets/folder.py#L46\r\n\r\ncaused by\r\nhttps://github.com/python-pillow/Pillow/blob/7d8c0d9e39d5ed79ea6aa590b9cbff5b0d17248c/PIL/Image.py#L573\n", "before_files": [{"content": "import torch.utils.data as data\n\nfrom PIL import Image\nimport os\nimport os.path\n\nIMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']\n\n\ndef is_image_file(filename):\n \"\"\"Checks if a file is an image.\n\n Args:\n filename (string): path to a file\n\n Returns:\n bool: True if the filename ends with a known image extension\n \"\"\"\n filename_lower = filename.lower()\n return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)\n\n\ndef find_classes(dir):\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n\ndef make_dataset(dir, class_to_idx):\n images = []\n dir = os.path.expanduser(dir)\n for target in sorted(os.listdir(dir)):\n d = os.path.join(dir, target)\n if not os.path.isdir(d):\n continue\n\n for root, _, fnames in sorted(os.walk(d)):\n for fname in sorted(fnames):\n if is_image_file(fname):\n path = os.path.join(root, fname)\n item = (path, class_to_idx[target])\n images.append(item)\n\n return images\n\n\ndef pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n return img.convert('RGB')\n\n\ndef accimage_loader(path):\n import accimage\n try:\n return accimage.Image(path)\n except IOError:\n # Potentially a decoding problem, fall back to PIL.Image\n return pil_loader(path)\n\n\ndef default_loader(path):\n from torchvision import get_image_backend\n if get_image_backend() == 'accimage':\n return accimage_loader(path)\n else:\n return pil_loader(path)\n\n\nclass ImageFolder(data.Dataset):\n \"\"\"A generic data loader where the images are arranged in this way: ::\n\n root/dog/xxx.png\n root/dog/xxy.png\n root/dog/xxz.png\n\n root/cat/123.png\n root/cat/nsdf3.png\n root/cat/asd932_.png\n\n Args:\n root (string): Root directory path.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n imgs (list): List of (image path, class_index) tuples\n \"\"\"\n\n def __init__(self, root, transform=None, target_transform=None,\n loader=default_loader):\n classes, class_to_idx = find_classes(root)\n imgs = make_dataset(root, class_to_idx)\n if len(imgs) == 0:\n raise(RuntimeError(\"Found 0 images in subfolders of: \" + root + \"\\n\"\n \"Supported image extensions are: \" + \",\".join(IMG_EXTENSIONS)))\n\n self.root = root\n self.imgs = imgs\n self.classes = classes\n self.class_to_idx = class_to_idx\n self.transform = transform\n self.target_transform = target_transform\n self.loader = loader\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is class_index of the target class.\n \"\"\"\n path, target = self.imgs[index]\n img = self.loader(path)\n if self.transform is not None:\n img = self.transform(img)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n return len(self.imgs)\n", "path": "torchvision/datasets/folder.py"}]}
1,913
140
gh_patches_debug_20191
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-267
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DB function to edit column names **Problem** <!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.--> The user might want to edit column names. **Proposed solution** <!-- A clear and concise description of your proposed solution or feature. --> We need a `db` function to allow the user to edit the name of a column. We need to check and fix foreign keys and other references to the column. </issue> <code> [start of db/columns.py] 1 from sqlalchemy import Column, Integer, ForeignKey 2 from db import constants 3 4 5 NULLABLE = "nullable" 6 PRIMARY_KEY = "primary_key" 7 TYPE = "type" 8 9 ID_TYPE = Integer 10 DEFAULT_COLUMNS = { 11 constants.ID: {TYPE: ID_TYPE, PRIMARY_KEY: True, NULLABLE: False} 12 } 13 14 15 class MathesarColumn(Column): 16 """ 17 This class constrains the possible arguments, enabling us to include 18 a copy method (which has been deprecated in upstream SQLAlchemy since 19 1.4). The idea is that we can faithfully copy the subset of the 20 column definition that we care about, and this class defines that 21 subset. 22 """ 23 def __init__( 24 self, 25 name, 26 sa_type, 27 foreign_keys=set(), 28 primary_key=False, 29 nullable=True, 30 ): 31 """ 32 Construct a new ``MathesarColumn`` object. 33 34 Required arguments: 35 name -- String giving the name of the column in the database. 36 sa_type -- the SQLAlchemy type of the column. 37 38 Optional keyword arguments: 39 primary_key -- Boolean giving whether the column is a primary key. 40 """ 41 super().__init__( 42 *foreign_keys, 43 name=name, 44 type_=sa_type, 45 primary_key=primary_key, 46 nullable=nullable, 47 ) 48 49 @classmethod 50 def from_column(cls, column): 51 """ 52 This alternate init method creates a new column (a copy) of the 53 given column. It respects only the properties in the __init__ 54 of the MathesarColumn. 55 """ 56 fkeys = {ForeignKey(fk.target_fullname) for fk in column.foreign_keys} 57 return cls( 58 column.name, 59 column.type, 60 foreign_keys=fkeys, 61 primary_key=column.primary_key, 62 nullable=column.nullable, 63 ) 64 65 @property 66 def is_default(self): 67 default_def = DEFAULT_COLUMNS.get(self.name, False) 68 return ( 69 default_def 70 and self.type.python_type == default_def[TYPE]().python_type 71 and self.primary_key == default_def.get(PRIMARY_KEY, False) 72 and self.nullable == default_def.get(NULLABLE, True) 73 ) 74 75 76 def get_default_mathesar_column_list(): 77 return [ 78 MathesarColumn( 79 c, 80 DEFAULT_COLUMNS[c][TYPE], 81 primary_key=DEFAULT_COLUMNS[c][PRIMARY_KEY] 82 ) 83 for c in DEFAULT_COLUMNS 84 ] 85 86 87 def init_mathesar_table_column_list_with_defaults(column_list): 88 default_columns = get_default_mathesar_column_list() 89 given_columns = [MathesarColumn.from_column(c) for c in column_list] 90 return default_columns + given_columns 91 [end of db/columns.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/db/columns.py b/db/columns.py --- a/db/columns.py +++ b/db/columns.py @@ -1,4 +1,4 @@ -from sqlalchemy import Column, Integer, ForeignKey +from sqlalchemy import Column, Integer, ForeignKey, Table, DDL, MetaData from db import constants @@ -88,3 +88,19 @@ default_columns = get_default_mathesar_column_list() given_columns = [MathesarColumn.from_column(c) for c in column_list] return default_columns + given_columns + + +def rename_column(schema, table_name, column_name, new_column_name, engine): + _preparer = engine.dialect.identifier_preparer + with engine.begin() as conn: + metadata = MetaData(bind=engine, schema=schema) + table = Table(table_name, metadata, schema=schema, autoload_with=engine) + column = table.columns[column_name] + prepared_table_name = _preparer.format_table(table) + prepared_column_name = _preparer.format_column(column) + prepared_new_column_name = _preparer.quote(new_column_name) + alter_stmt = f""" + ALTER TABLE {prepared_table_name} + RENAME {prepared_column_name} TO {prepared_new_column_name} + """ + conn.execute(DDL(alter_stmt))
{"golden_diff": "diff --git a/db/columns.py b/db/columns.py\n--- a/db/columns.py\n+++ b/db/columns.py\n@@ -1,4 +1,4 @@\n-from sqlalchemy import Column, Integer, ForeignKey\n+from sqlalchemy import Column, Integer, ForeignKey, Table, DDL, MetaData\n from db import constants\n \n \n@@ -88,3 +88,19 @@\n default_columns = get_default_mathesar_column_list()\n given_columns = [MathesarColumn.from_column(c) for c in column_list]\n return default_columns + given_columns\n+\n+\n+def rename_column(schema, table_name, column_name, new_column_name, engine):\n+ _preparer = engine.dialect.identifier_preparer\n+ with engine.begin() as conn:\n+ metadata = MetaData(bind=engine, schema=schema)\n+ table = Table(table_name, metadata, schema=schema, autoload_with=engine)\n+ column = table.columns[column_name]\n+ prepared_table_name = _preparer.format_table(table)\n+ prepared_column_name = _preparer.format_column(column)\n+ prepared_new_column_name = _preparer.quote(new_column_name)\n+ alter_stmt = f\"\"\"\n+ ALTER TABLE {prepared_table_name}\n+ RENAME {prepared_column_name} TO {prepared_new_column_name}\n+ \"\"\"\n+ conn.execute(DDL(alter_stmt))\n", "issue": "DB function to edit column names\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nThe user might want to edit column names.\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nWe need a `db` function to allow the user to edit the name of a column. We need to check and fix foreign keys and other references to the column.\n", "before_files": [{"content": "from sqlalchemy import Column, Integer, ForeignKey\nfrom db import constants\n\n\nNULLABLE = \"nullable\"\nPRIMARY_KEY = \"primary_key\"\nTYPE = \"type\"\n\nID_TYPE = Integer\nDEFAULT_COLUMNS = {\n constants.ID: {TYPE: ID_TYPE, PRIMARY_KEY: True, NULLABLE: False}\n}\n\n\nclass MathesarColumn(Column):\n \"\"\"\n This class constrains the possible arguments, enabling us to include\n a copy method (which has been deprecated in upstream SQLAlchemy since\n 1.4). The idea is that we can faithfully copy the subset of the\n column definition that we care about, and this class defines that\n subset.\n \"\"\"\n def __init__(\n self,\n name,\n sa_type,\n foreign_keys=set(),\n primary_key=False,\n nullable=True,\n ):\n \"\"\"\n Construct a new ``MathesarColumn`` object.\n\n Required arguments:\n name -- String giving the name of the column in the database.\n sa_type -- the SQLAlchemy type of the column.\n\n Optional keyword arguments:\n primary_key -- Boolean giving whether the column is a primary key.\n \"\"\"\n super().__init__(\n *foreign_keys,\n name=name,\n type_=sa_type,\n primary_key=primary_key,\n nullable=nullable,\n )\n\n @classmethod\n def from_column(cls, column):\n \"\"\"\n This alternate init method creates a new column (a copy) of the\n given column. It respects only the properties in the __init__\n of the MathesarColumn.\n \"\"\"\n fkeys = {ForeignKey(fk.target_fullname) for fk in column.foreign_keys}\n return cls(\n column.name,\n column.type,\n foreign_keys=fkeys,\n primary_key=column.primary_key,\n nullable=column.nullable,\n )\n\n @property\n def is_default(self):\n default_def = DEFAULT_COLUMNS.get(self.name, False)\n return (\n default_def\n and self.type.python_type == default_def[TYPE]().python_type\n and self.primary_key == default_def.get(PRIMARY_KEY, False)\n and self.nullable == default_def.get(NULLABLE, True)\n )\n\n\ndef get_default_mathesar_column_list():\n return [\n MathesarColumn(\n c,\n DEFAULT_COLUMNS[c][TYPE],\n primary_key=DEFAULT_COLUMNS[c][PRIMARY_KEY]\n )\n for c in DEFAULT_COLUMNS\n ]\n\n\ndef init_mathesar_table_column_list_with_defaults(column_list):\n default_columns = get_default_mathesar_column_list()\n given_columns = [MathesarColumn.from_column(c) for c in column_list]\n return default_columns + given_columns\n", "path": "db/columns.py"}]}
1,360
292
gh_patches_debug_5030
rasdani/github-patches
git_diff
geopandas__geopandas-1544
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BUG: test_numerical_operations fails under pandas 1.1.0 [Failure](https://travis-ci.org/github/geopandas/geopandas/jobs/712675986#L2140) in `test_numerical_operations` (`idxmin` error type) -> pandas-dev/pandas#32749. Even though it is a known regression in pandas, we should handle it (at least skip tests for now). </issue> <code> [start of geopandas/_compat.py] 1 from distutils.version import LooseVersion 2 import importlib 3 import os 4 import warnings 5 6 import pandas as pd 7 import shapely 8 9 # ----------------------------------------------------------------------------- 10 # pandas compat 11 # ----------------------------------------------------------------------------- 12 13 PANDAS_GE_025 = str(pd.__version__) >= LooseVersion("0.25.0") 14 PANDAS_GE_10 = str(pd.__version__) >= LooseVersion("0.26.0.dev") 15 PANDAS_GE_11 = str(pd.__version__) >= LooseVersion("1.1.0.dev") 16 17 18 # ----------------------------------------------------------------------------- 19 # Shapely / PyGEOS compat 20 # ----------------------------------------------------------------------------- 21 22 23 SHAPELY_GE_17 = str(shapely.__version__) >= LooseVersion("1.7.0") 24 25 HAS_PYGEOS = None 26 USE_PYGEOS = None 27 PYGEOS_SHAPELY_COMPAT = None 28 29 try: 30 import pygeos # noqa 31 32 HAS_PYGEOS = True 33 except ImportError: 34 HAS_PYGEOS = False 35 36 37 def set_use_pygeos(val=None): 38 """ 39 Set the global configuration on whether to use PyGEOS or not. 40 41 The default is use PyGEOS if it is installed. This can be overridden 42 with an environment variable USE_PYGEOS (this is only checked at 43 first import, cannot be changed during interactive session). 44 45 Alternatively, pass a value here to force a True/False value. 46 """ 47 global USE_PYGEOS 48 global PYGEOS_SHAPELY_COMPAT 49 50 if val is not None: 51 USE_PYGEOS = bool(val) 52 else: 53 if USE_PYGEOS is None: 54 55 USE_PYGEOS = HAS_PYGEOS 56 57 env_use_pygeos = os.getenv("USE_PYGEOS", None) 58 if env_use_pygeos is not None: 59 USE_PYGEOS = bool(int(env_use_pygeos)) 60 61 # validate the pygeos version 62 if USE_PYGEOS: 63 try: 64 import pygeos # noqa 65 66 # validate the pygeos version 67 if not str(pygeos.__version__) >= LooseVersion("0.6"): 68 raise ImportError( 69 "PyGEOS >= 0.6 is required, version {0} is installed".format( 70 pygeos.__version__ 71 ) 72 ) 73 74 # Check whether Shapely and PyGEOS use the same GEOS version. 75 # Based on PyGEOS from_shapely implementation. 76 77 from shapely.geos import geos_version_string as shapely_geos_version 78 from pygeos import geos_capi_version_string 79 80 # shapely has something like: "3.6.2-CAPI-1.10.2 4d2925d6" 81 # pygeos has something like: "3.6.2-CAPI-1.10.2" 82 if not shapely_geos_version.startswith(geos_capi_version_string): 83 warnings.warn( 84 "The Shapely GEOS version ({}) is incompatible with the GEOS " 85 "version PyGEOS was compiled with ({}). Conversions between both " 86 "will be slow.".format( 87 shapely_geos_version, geos_capi_version_string 88 ) 89 ) 90 PYGEOS_SHAPELY_COMPAT = False 91 else: 92 PYGEOS_SHAPELY_COMPAT = True 93 94 except ImportError: 95 raise ImportError( 96 "To use the PyGEOS speed-ups within GeoPandas, you need to install " 97 "PyGEOS: 'conda install pygeos' or 'pip install pygeos'" 98 ) 99 100 101 set_use_pygeos() 102 103 104 def import_optional_dependency(name: str, extra: str = ""): 105 """ 106 Import an optional dependency. 107 108 Adapted from pandas.compat._optional::import_optional_dependency 109 110 Raises a formatted ImportError if the module is not present. 111 112 Parameters 113 ---------- 114 name : str 115 The module name. 116 extra : str 117 Additional text to include in the ImportError message. 118 Returns 119 ------- 120 module 121 """ 122 msg = """Missing optional dependency '{name}'. {extra} " 123 "Use pip or conda to install {name}.""".format( 124 name=name, extra=extra 125 ) 126 127 if not isinstance(name, str): 128 raise ValueError( 129 "Invalid module name: '{name}'; must be a string".format(name=name) 130 ) 131 132 try: 133 module = importlib.import_module(name) 134 135 except ImportError: 136 raise ImportError(msg) from None 137 138 return module 139 140 141 # ----------------------------------------------------------------------------- 142 # RTree compat 143 # ----------------------------------------------------------------------------- 144 145 HAS_RTREE = None 146 RTREE_GE_094 = False 147 try: 148 import rtree # noqa 149 150 HAS_RTREE = True 151 except ImportError: 152 HAS_RTREE = False 153 [end of geopandas/_compat.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/geopandas/_compat.py b/geopandas/_compat.py --- a/geopandas/_compat.py +++ b/geopandas/_compat.py @@ -12,7 +12,7 @@ PANDAS_GE_025 = str(pd.__version__) >= LooseVersion("0.25.0") PANDAS_GE_10 = str(pd.__version__) >= LooseVersion("0.26.0.dev") -PANDAS_GE_11 = str(pd.__version__) >= LooseVersion("1.1.0.dev") +PANDAS_GE_11 = str(pd.__version__) >= LooseVersion("1.1.0") # -----------------------------------------------------------------------------
{"golden_diff": "diff --git a/geopandas/_compat.py b/geopandas/_compat.py\n--- a/geopandas/_compat.py\n+++ b/geopandas/_compat.py\n@@ -12,7 +12,7 @@\n \n PANDAS_GE_025 = str(pd.__version__) >= LooseVersion(\"0.25.0\")\n PANDAS_GE_10 = str(pd.__version__) >= LooseVersion(\"0.26.0.dev\")\n-PANDAS_GE_11 = str(pd.__version__) >= LooseVersion(\"1.1.0.dev\")\n+PANDAS_GE_11 = str(pd.__version__) >= LooseVersion(\"1.1.0\")\n \n \n # -----------------------------------------------------------------------------\n", "issue": "BUG: test_numerical_operations fails under pandas 1.1.0\n[Failure](https://travis-ci.org/github/geopandas/geopandas/jobs/712675986#L2140) in `test_numerical_operations` (`idxmin` error type) -> pandas-dev/pandas#32749. Even though it is a known regression in pandas, we should handle it (at least skip tests for now).\n", "before_files": [{"content": "from distutils.version import LooseVersion\nimport importlib\nimport os\nimport warnings\n\nimport pandas as pd\nimport shapely\n\n# -----------------------------------------------------------------------------\n# pandas compat\n# -----------------------------------------------------------------------------\n\nPANDAS_GE_025 = str(pd.__version__) >= LooseVersion(\"0.25.0\")\nPANDAS_GE_10 = str(pd.__version__) >= LooseVersion(\"0.26.0.dev\")\nPANDAS_GE_11 = str(pd.__version__) >= LooseVersion(\"1.1.0.dev\")\n\n\n# -----------------------------------------------------------------------------\n# Shapely / PyGEOS compat\n# -----------------------------------------------------------------------------\n\n\nSHAPELY_GE_17 = str(shapely.__version__) >= LooseVersion(\"1.7.0\")\n\nHAS_PYGEOS = None\nUSE_PYGEOS = None\nPYGEOS_SHAPELY_COMPAT = None\n\ntry:\n import pygeos # noqa\n\n HAS_PYGEOS = True\nexcept ImportError:\n HAS_PYGEOS = False\n\n\ndef set_use_pygeos(val=None):\n \"\"\"\n Set the global configuration on whether to use PyGEOS or not.\n\n The default is use PyGEOS if it is installed. This can be overridden\n with an environment variable USE_PYGEOS (this is only checked at\n first import, cannot be changed during interactive session).\n\n Alternatively, pass a value here to force a True/False value.\n \"\"\"\n global USE_PYGEOS\n global PYGEOS_SHAPELY_COMPAT\n\n if val is not None:\n USE_PYGEOS = bool(val)\n else:\n if USE_PYGEOS is None:\n\n USE_PYGEOS = HAS_PYGEOS\n\n env_use_pygeos = os.getenv(\"USE_PYGEOS\", None)\n if env_use_pygeos is not None:\n USE_PYGEOS = bool(int(env_use_pygeos))\n\n # validate the pygeos version\n if USE_PYGEOS:\n try:\n import pygeos # noqa\n\n # validate the pygeos version\n if not str(pygeos.__version__) >= LooseVersion(\"0.6\"):\n raise ImportError(\n \"PyGEOS >= 0.6 is required, version {0} is installed\".format(\n pygeos.__version__\n )\n )\n\n # Check whether Shapely and PyGEOS use the same GEOS version.\n # Based on PyGEOS from_shapely implementation.\n\n from shapely.geos import geos_version_string as shapely_geos_version\n from pygeos import geos_capi_version_string\n\n # shapely has something like: \"3.6.2-CAPI-1.10.2 4d2925d6\"\n # pygeos has something like: \"3.6.2-CAPI-1.10.2\"\n if not shapely_geos_version.startswith(geos_capi_version_string):\n warnings.warn(\n \"The Shapely GEOS version ({}) is incompatible with the GEOS \"\n \"version PyGEOS was compiled with ({}). Conversions between both \"\n \"will be slow.\".format(\n shapely_geos_version, geos_capi_version_string\n )\n )\n PYGEOS_SHAPELY_COMPAT = False\n else:\n PYGEOS_SHAPELY_COMPAT = True\n\n except ImportError:\n raise ImportError(\n \"To use the PyGEOS speed-ups within GeoPandas, you need to install \"\n \"PyGEOS: 'conda install pygeos' or 'pip install pygeos'\"\n )\n\n\nset_use_pygeos()\n\n\ndef import_optional_dependency(name: str, extra: str = \"\"):\n \"\"\"\n Import an optional dependency.\n\n Adapted from pandas.compat._optional::import_optional_dependency\n\n Raises a formatted ImportError if the module is not present.\n\n Parameters\n ----------\n name : str\n The module name.\n extra : str\n Additional text to include in the ImportError message.\n Returns\n -------\n module\n \"\"\"\n msg = \"\"\"Missing optional dependency '{name}'. {extra} \"\n \"Use pip or conda to install {name}.\"\"\".format(\n name=name, extra=extra\n )\n\n if not isinstance(name, str):\n raise ValueError(\n \"Invalid module name: '{name}'; must be a string\".format(name=name)\n )\n\n try:\n module = importlib.import_module(name)\n\n except ImportError:\n raise ImportError(msg) from None\n\n return module\n\n\n# -----------------------------------------------------------------------------\n# RTree compat\n# -----------------------------------------------------------------------------\n\nHAS_RTREE = None\nRTREE_GE_094 = False\ntry:\n import rtree # noqa\n\n HAS_RTREE = True\nexcept ImportError:\n HAS_RTREE = False\n", "path": "geopandas/_compat.py"}]}
2,035
151
gh_patches_debug_23203
rasdani/github-patches
git_diff
carpentries__amy-2339
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Server Error when editing community roles Reported by @ErinBecker at `/workshops/person/<id>/edit/#communityroles` and `/communityroles/role/<another id>/edit/` in production. Reproduced on test AMY: ``` TypeError at /communityroles/role/25/edit/ the JSON object must be str, bytes or bytearray, not list Error during template rendering In template /webapps/test-amy.carpentries.org/repo/amy/templates/bootstrap4/field.html, error at line 39 {% crispy_field field %} ``` Also encountered locally when creating a community role and immediately editing it: ``` Exception Value: | 'NoneType' object is not iterable /home/eli/amy/amy/communityroles/fields.py, line 20, in get_context ``` </issue> <code> [start of amy/communityroles/fields.py] 1 import json 2 3 from django import forms 4 from django.http import QueryDict 5 from django.utils.datastructures import MultiValueDict 6 7 8 class CustomKeysWidget(forms.TextInput): 9 template_name = "widgets/custom_keys_widget.html" 10 11 def __init__(self, *args, **kwargs): 12 self.subwidget_form = kwargs.pop("subwidget_form", forms.TextInput) 13 super().__init__(*args, **kwargs) 14 15 def apply_labels(self, labels: list[str]) -> None: 16 self.labels = labels[:] 17 18 def get_context(self, name: str, value: str, attrs: dict): 19 value_deserialized = json.loads(value) 20 value_deserialized_dict = dict(value_deserialized) 21 default_values = dict([(label, "") for label in self.labels]) 22 context_value = default_values | value_deserialized_dict 23 24 context = super().get_context(name, context_value, attrs) 25 final_attrs = context["widget"]["attrs"] 26 id_ = context["widget"]["attrs"].get("id") 27 28 subwidgets = [] 29 for index, (label, value) in enumerate(context_value.items()): 30 widget_attrs = final_attrs.copy() 31 if id_: 32 widget_attrs["id"] = "{id_}_{index}".format(id_=id_, index=index) 33 34 widget = self.subwidget_form() 35 subwidget_context = widget.get_context(name, value, widget_attrs)["widget"] 36 subwidgets.append(subwidget_context | {"label": label}) 37 38 context["widget"]["subwidgets"] = subwidgets 39 return context 40 41 def value_from_datadict( 42 self, data: QueryDict, files: MultiValueDict, name: str 43 ) -> list[tuple[str, str]]: 44 """Prepare structure stored in database. The structure is tied to 45 `CommunityRole.custom_keys` expected format: 46 [ 47 (label1, value1), 48 (label2, value2), 49 ... 50 ] 51 """ 52 try: 53 values = data.getlist(name) 54 except AttributeError: 55 values = data.get(name, []) 56 return list(zip(self.labels, values)) 57 58 def value_omitted_from_data( 59 self, data: QueryDict, files: MultiValueDict, name: str 60 ) -> bool: 61 return False 62 63 64 class CustomKeysJSONField(forms.JSONField): 65 def __init__(self, **kwargs): 66 kwargs.setdefault("widget", CustomKeysWidget) 67 super().__init__(**kwargs) 68 69 def apply_labels(self, labels: list[str]) -> None: 70 self.labels = labels[:] 71 self.widget.apply_labels(self.labels) 72 [end of amy/communityroles/fields.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/amy/communityroles/fields.py b/amy/communityroles/fields.py --- a/amy/communityroles/fields.py +++ b/amy/communityroles/fields.py @@ -1,9 +1,12 @@ import json +import logging from django import forms from django.http import QueryDict from django.utils.datastructures import MultiValueDict +logger = logging.getLogger("amy") + class CustomKeysWidget(forms.TextInput): template_name = "widgets/custom_keys_widget.html" @@ -17,7 +20,14 @@ def get_context(self, name: str, value: str, attrs: dict): value_deserialized = json.loads(value) - value_deserialized_dict = dict(value_deserialized) + try: + value_deserialized_dict = dict(value_deserialized) + except (ValueError, TypeError) as e: + logger.debug( + f"Failed to load custom key values {value_deserialized} to dict: {e}." + ) + logger.debug("Proceeding without custom key values...") + value_deserialized_dict = {} default_values = dict([(label, "") for label in self.labels]) context_value = default_values | value_deserialized_dict
{"golden_diff": "diff --git a/amy/communityroles/fields.py b/amy/communityroles/fields.py\n--- a/amy/communityroles/fields.py\n+++ b/amy/communityroles/fields.py\n@@ -1,9 +1,12 @@\n import json\n+import logging\n \n from django import forms\n from django.http import QueryDict\n from django.utils.datastructures import MultiValueDict\n \n+logger = logging.getLogger(\"amy\")\n+\n \n class CustomKeysWidget(forms.TextInput):\n template_name = \"widgets/custom_keys_widget.html\"\n@@ -17,7 +20,14 @@\n \n def get_context(self, name: str, value: str, attrs: dict):\n value_deserialized = json.loads(value)\n- value_deserialized_dict = dict(value_deserialized)\n+ try:\n+ value_deserialized_dict = dict(value_deserialized)\n+ except (ValueError, TypeError) as e:\n+ logger.debug(\n+ f\"Failed to load custom key values {value_deserialized} to dict: {e}.\"\n+ )\n+ logger.debug(\"Proceeding without custom key values...\")\n+ value_deserialized_dict = {}\n default_values = dict([(label, \"\") for label in self.labels])\n context_value = default_values | value_deserialized_dict\n", "issue": "Server Error when editing community roles\nReported by @ErinBecker at `/workshops/person/<id>/edit/#communityroles` and `/communityroles/role/<another id>/edit/` in production.\r\n\r\nReproduced on test AMY:\r\n```\r\nTypeError at /communityroles/role/25/edit/\r\nthe JSON object must be str, bytes or bytearray, not list\r\nError during template rendering\r\nIn template /webapps/test-amy.carpentries.org/repo/amy/templates/bootstrap4/field.html, error at line 39\r\n\r\n{% crispy_field field %}\r\n```\r\n\r\nAlso encountered locally when creating a community role and immediately editing it:\r\n```\r\nException Value: | 'NoneType' object is not iterable\r\n/home/eli/amy/amy/communityroles/fields.py, line 20, in get_context\r\n\r\n```\n", "before_files": [{"content": "import json\n\nfrom django import forms\nfrom django.http import QueryDict\nfrom django.utils.datastructures import MultiValueDict\n\n\nclass CustomKeysWidget(forms.TextInput):\n template_name = \"widgets/custom_keys_widget.html\"\n\n def __init__(self, *args, **kwargs):\n self.subwidget_form = kwargs.pop(\"subwidget_form\", forms.TextInput)\n super().__init__(*args, **kwargs)\n\n def apply_labels(self, labels: list[str]) -> None:\n self.labels = labels[:]\n\n def get_context(self, name: str, value: str, attrs: dict):\n value_deserialized = json.loads(value)\n value_deserialized_dict = dict(value_deserialized)\n default_values = dict([(label, \"\") for label in self.labels])\n context_value = default_values | value_deserialized_dict\n\n context = super().get_context(name, context_value, attrs)\n final_attrs = context[\"widget\"][\"attrs\"]\n id_ = context[\"widget\"][\"attrs\"].get(\"id\")\n\n subwidgets = []\n for index, (label, value) in enumerate(context_value.items()):\n widget_attrs = final_attrs.copy()\n if id_:\n widget_attrs[\"id\"] = \"{id_}_{index}\".format(id_=id_, index=index)\n\n widget = self.subwidget_form()\n subwidget_context = widget.get_context(name, value, widget_attrs)[\"widget\"]\n subwidgets.append(subwidget_context | {\"label\": label})\n\n context[\"widget\"][\"subwidgets\"] = subwidgets\n return context\n\n def value_from_datadict(\n self, data: QueryDict, files: MultiValueDict, name: str\n ) -> list[tuple[str, str]]:\n \"\"\"Prepare structure stored in database. The structure is tied to\n `CommunityRole.custom_keys` expected format:\n [\n (label1, value1),\n (label2, value2),\n ...\n ]\n \"\"\"\n try:\n values = data.getlist(name)\n except AttributeError:\n values = data.get(name, [])\n return list(zip(self.labels, values))\n\n def value_omitted_from_data(\n self, data: QueryDict, files: MultiValueDict, name: str\n ) -> bool:\n return False\n\n\nclass CustomKeysJSONField(forms.JSONField):\n def __init__(self, **kwargs):\n kwargs.setdefault(\"widget\", CustomKeysWidget)\n super().__init__(**kwargs)\n\n def apply_labels(self, labels: list[str]) -> None:\n self.labels = labels[:]\n self.widget.apply_labels(self.labels)\n", "path": "amy/communityroles/fields.py"}]}
1,393
267
gh_patches_debug_21536
rasdani/github-patches
git_diff
pymeasure__pymeasure-81
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Consider installing the tests Since you decided to embed the test suite inside the package instead of leaving it outside in a common `tests` folder (both of which are totally acceptable and covered by the `pytest` guidelines), why not install the tests with the package? It would give the following benefits: - Simplify the `setuptools` metadata, by replacing the explicit listing of the modules with a single call to `find_package`, - Easy testing of the installed package via `python -m pytest --pyargs pymeasure`. </issue> <code> [start of setup.py] 1 # 2 # This file is part of the PyMeasure package. 3 # 4 # Copyright (c) 2013-2017 PyMeasure Developers 5 # 6 # Permission is hereby granted, free of charge, to any person obtaining a copy 7 # of this software and associated documentation files (the "Software"), to deal 8 # in the Software without restriction, including without limitation the rights 9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 # copies of the Software, and to permit persons to whom the Software is 11 # furnished to do so, subject to the following conditions: 12 # 13 # The above copyright notice and this permission notice shall be included in 14 # all copies or substantial portions of the Software. 15 # 16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 # THE SOFTWARE. 23 # 24 25 26 from setuptools import setup 27 28 setup( 29 name='PyMeasure', 30 version='0.4.3', 31 author='PyMeasure Developers', 32 packages=[ 33 'pymeasure', 'pymeasure.instruments', 34 'pymeasure.adapters', 'pymeasure.display', 35 'pymeasure.experiment', 36 'pymeasure.instruments.agilent', 37 'pymeasure.instruments.ami', 38 'pymeasure.instruments.anritsu', 39 'pymeasure.instruments.danfysik', 40 'pymeasure.instruments.fwbell', 41 'pymeasure.instruments.hp', 42 'pymeasure.instruments.keithley', 43 'pymeasure.instruments.lakeshore', 44 'pymeasure.instruments.newport', 45 'pymeasure.instruments.parker', 46 'pymeasure.instruments.signalrecovery', 47 'pymeasure.instruments.srs', 48 'pymeasure.instruments.thorlabs', 49 'pymeasure.instruments.tektronix', 50 'pymeasure.instruments.yokogawa', 51 ], 52 scripts=[], 53 url='https://github.com/ralph-group/pymeasure', 54 download_url = 'https://github.com/ralph-group/pymeasure/tarball/v0.4.3', 55 license='MIT License', 56 description='Scientific measurement library for instruments, experiments, and live-plotting', 57 long_description=open('README.rst').read() + "\n\n" + open('CHANGES.txt').read(), 58 install_requires=[ 59 "Numpy >= 1.6.1", 60 "pandas >= 0.14", 61 "pyvisa >= 1.8", 62 "pyserial >= 2.7", 63 "pyqtgraph >= 0.9.10" 64 ], 65 classifiers=[ 66 "Development Status :: 4 - Beta", 67 "Intended Audience :: Science/Research", 68 "License :: OSI Approved :: MIT License", 69 "Operating System :: MacOS", 70 "Operating System :: Microsoft :: Windows", 71 "Operating System :: POSIX", 72 "Operating System :: Unix", 73 "Programming Language :: Python :: 3 :: Only", 74 "Topic :: Scientific/Engineering", 75 ], 76 keywords="measure instrument experiment control automate graph plot" 77 ) 78 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -23,32 +23,13 @@ # -from setuptools import setup +from setuptools import setup, find_packages setup( name='PyMeasure', version='0.4.3', author='PyMeasure Developers', - packages=[ - 'pymeasure', 'pymeasure.instruments', - 'pymeasure.adapters', 'pymeasure.display', - 'pymeasure.experiment', - 'pymeasure.instruments.agilent', - 'pymeasure.instruments.ami', - 'pymeasure.instruments.anritsu', - 'pymeasure.instruments.danfysik', - 'pymeasure.instruments.fwbell', - 'pymeasure.instruments.hp', - 'pymeasure.instruments.keithley', - 'pymeasure.instruments.lakeshore', - 'pymeasure.instruments.newport', - 'pymeasure.instruments.parker', - 'pymeasure.instruments.signalrecovery', - 'pymeasure.instruments.srs', - 'pymeasure.instruments.thorlabs', - 'pymeasure.instruments.tektronix', - 'pymeasure.instruments.yokogawa', - ], + packages=find_packages(), scripts=[], url='https://github.com/ralph-group/pymeasure', download_url = 'https://github.com/ralph-group/pymeasure/tarball/v0.4.3',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,32 +23,13 @@\n #\n \n \n-from setuptools import setup\n+from setuptools import setup, find_packages\n \n setup(\n name='PyMeasure',\n version='0.4.3',\n author='PyMeasure Developers',\n- packages=[\n- 'pymeasure', 'pymeasure.instruments',\n- 'pymeasure.adapters', 'pymeasure.display',\n- 'pymeasure.experiment',\n- 'pymeasure.instruments.agilent',\n- 'pymeasure.instruments.ami',\n- 'pymeasure.instruments.anritsu',\n- 'pymeasure.instruments.danfysik',\n- 'pymeasure.instruments.fwbell',\n- 'pymeasure.instruments.hp',\n- 'pymeasure.instruments.keithley',\n- 'pymeasure.instruments.lakeshore',\n- 'pymeasure.instruments.newport',\n- 'pymeasure.instruments.parker',\n- 'pymeasure.instruments.signalrecovery',\n- 'pymeasure.instruments.srs',\n- 'pymeasure.instruments.thorlabs',\n- 'pymeasure.instruments.tektronix',\n- 'pymeasure.instruments.yokogawa',\n- ],\n+ packages=find_packages(),\n scripts=[],\n url='https://github.com/ralph-group/pymeasure',\n download_url = 'https://github.com/ralph-group/pymeasure/tarball/v0.4.3',\n", "issue": "Consider installing the tests\nSince you decided to embed the test suite inside the package instead of leaving it outside in a common `tests` folder (both of which are totally acceptable and covered by the `pytest` guidelines), why not install the tests with the package? It would give the following benefits:\r\n\r\n- Simplify the `setuptools` metadata, by replacing the explicit listing of the modules with a single call to `find_package`,\r\n- Easy testing of the installed package via `python -m pytest --pyargs pymeasure`.\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2017 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\n\nfrom setuptools import setup\n\nsetup(\n name='PyMeasure',\n version='0.4.3',\n author='PyMeasure Developers',\n packages=[\n 'pymeasure', 'pymeasure.instruments',\n 'pymeasure.adapters', 'pymeasure.display',\n 'pymeasure.experiment',\n 'pymeasure.instruments.agilent',\n 'pymeasure.instruments.ami',\n 'pymeasure.instruments.anritsu',\n 'pymeasure.instruments.danfysik',\n 'pymeasure.instruments.fwbell',\n 'pymeasure.instruments.hp',\n 'pymeasure.instruments.keithley',\n 'pymeasure.instruments.lakeshore',\n 'pymeasure.instruments.newport',\n 'pymeasure.instruments.parker',\n 'pymeasure.instruments.signalrecovery',\n 'pymeasure.instruments.srs',\n 'pymeasure.instruments.thorlabs',\n 'pymeasure.instruments.tektronix',\n 'pymeasure.instruments.yokogawa',\n ],\n scripts=[],\n url='https://github.com/ralph-group/pymeasure',\n download_url = 'https://github.com/ralph-group/pymeasure/tarball/v0.4.3',\n license='MIT License',\n description='Scientific measurement library for instruments, experiments, and live-plotting',\n long_description=open('README.rst').read() + \"\\n\\n\" + open('CHANGES.txt').read(),\n install_requires=[\n \"Numpy >= 1.6.1\",\n \"pandas >= 0.14\",\n \"pyvisa >= 1.8\",\n \"pyserial >= 2.7\",\n \"pyqtgraph >= 0.9.10\"\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"measure instrument experiment control automate graph plot\"\n)\n", "path": "setup.py"}]}
1,526
348
gh_patches_debug_22658
rasdani/github-patches
git_diff
DDMAL__CantusDB-942
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> List of Genres: alphabetization should be case-insensitive The Lists of Feasts (this includes the list of all feasts as well as only the temporale and only the sanctorale) are ordered by alphabetic order of feast name, except that capital letters are currently taking precedence; a capital "Z" comes before a lowercase "a" and that muddles the system somewhat. Behold: <img width="1119" alt="NewCantus temporale list" src="https://github.com/DDMAL/CantusDB/assets/83373378/2c4f7d2e-df3c-4a57-8c52-7acbfe96b099"> You can see that the alphabet thing is solid until Ad Processionem, after which the third letter becomes lowercase (Ad aquam benedictio) and the alphabet starts all over again. Maybe this is preferable, but OldCantus doesn't take capitalization into account, so I thought I'd mention it. This is the same list on OldCantus, for reference: <img width="1158" alt="OldCantus temporale list" src="https://github.com/DDMAL/CantusDB/assets/83373378/598ba519-a37a-478b-884f-a1ff6b07ee34"> </issue> <code> [start of django/cantusdb_project/main_app/views/feast.py] 1 from django.views.generic import DetailView, ListView 2 from main_app.models import Feast, Source 3 from extra_views import SearchableListMixin 4 5 # this categorization is not finalized yet 6 # the feastcode on old cantus requires cleaning 7 # for now we just leave this categorization as it is 8 TEMP_PREFIX = [ 9 "01", 10 "02", 11 "03", 12 "04", 13 "05", 14 "06", 15 "07", 16 "08", 17 "09", 18 "10", 19 "11", 20 "16", 21 "17", 22 ] 23 SANC_PREFIX = ["12", "13", "14", "15"] 24 25 26 class FeastDetailView(DetailView): 27 model = Feast 28 context_object_name = "feast" 29 template_name = "feast_detail.html" 30 31 def get_context_data(self, **kwargs): 32 context = super().get_context_data(**kwargs) 33 34 display_unpublished = self.request.user.is_authenticated 35 36 chants_in_feast = self.get_object().chant_set 37 if not display_unpublished: 38 chants_in_feast = chants_in_feast.filter(source__published=True) 39 40 cantus_ids = list( 41 chants_in_feast.values_list("cantus_id", flat=True).distinct() 42 ) 43 44 counts = [] 45 incipits = [] 46 genres = [] 47 48 for cantus_id in cantus_ids: 49 chants = chants_in_feast.filter(cantus_id=cantus_id) 50 count = chants.count() 51 incipit = chants.first().incipit 52 genre = chants.first().genre 53 54 counts.append(count) 55 incipits.append(incipit) 56 genres.append(genre) 57 58 zipped = zip(cantus_ids, incipits, genres, counts) 59 zipped = sorted(zipped, key=lambda t: t[3], reverse=True) 60 context["frequent_chants_zip"] = zipped 61 62 source_ids = list( 63 chants_in_feast.values_list("source__id", flat=True).distinct() 64 ) 65 sources = Source.objects.filter(id__in=source_ids) 66 counts = [chants_in_feast.filter(source=source).count() for source in sources] 67 68 zipped = zip(sources, counts) 69 zipped = sorted(zipped, key=lambda t: t[1], reverse=True) 70 context["sources_zip"] = zipped 71 72 return context 73 74 75 class FeastListView(SearchableListMixin, ListView): 76 """Searchable List view for Feast model 77 78 Accessed by /feasts/ 79 80 When passed a ``?q=<query>`` argument in the GET request, it will filter feasts 81 based on the fields defined in ``search_fields`` with the ``icontains`` lookup 82 83 The feasts can also be filtered by `date` (temp/sanc) and `month` and ordered by `sort_by`, 84 which are also passed as GET parameters 85 """ 86 87 model = Feast 88 search_fields = ["name", "description", "feast_code"] 89 paginate_by = 100 90 context_object_name = "feasts" 91 template_name = "feast_list.html" 92 93 def get_ordering(self): 94 ordering = self.request.GET.get("sort_by") 95 # feasts can be ordered by name or feast_code, 96 # default to ordering by name if given anything else 97 if ordering not in ["name", "feast_code"]: 98 ordering = "name" 99 return ordering 100 101 def get_queryset(self): 102 queryset = super().get_queryset() 103 date = self.request.GET.get("date") 104 month = self.request.GET.get("month") 105 # temp vs sanc categorization is not finalized yet, 106 # the feastcode needs to be updated by the cantus people 107 if date == "temp": 108 queryset = queryset.filter(prefix__in=TEMP_PREFIX) 109 elif date == "sanc": 110 queryset = queryset.filter(prefix__in=SANC_PREFIX) 111 112 if month and (int(month)) in range(1, 13): 113 month = int(month) 114 queryset = queryset.filter(month=month) 115 116 return queryset 117 [end of django/cantusdb_project/main_app/views/feast.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django/cantusdb_project/main_app/views/feast.py b/django/cantusdb_project/main_app/views/feast.py --- a/django/cantusdb_project/main_app/views/feast.py +++ b/django/cantusdb_project/main_app/views/feast.py @@ -1,4 +1,5 @@ from django.views.generic import DetailView, ListView +from django.db.models.functions import Lower from main_app.models import Feast, Source from extra_views import SearchableListMixin @@ -90,13 +91,14 @@ context_object_name = "feasts" template_name = "feast_list.html" - def get_ordering(self): + def get_ordering(self) -> tuple: ordering = self.request.GET.get("sort_by") # feasts can be ordered by name or feast_code, # default to ordering by name if given anything else if ordering not in ["name", "feast_code"]: ordering = "name" - return ordering + # case insensitive ordering by name + return (Lower(ordering),) if ordering == "name" else (ordering,) def get_queryset(self): queryset = super().get_queryset()
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/feast.py b/django/cantusdb_project/main_app/views/feast.py\n--- a/django/cantusdb_project/main_app/views/feast.py\n+++ b/django/cantusdb_project/main_app/views/feast.py\n@@ -1,4 +1,5 @@\n from django.views.generic import DetailView, ListView\n+from django.db.models.functions import Lower\n from main_app.models import Feast, Source\n from extra_views import SearchableListMixin\n \n@@ -90,13 +91,14 @@\n context_object_name = \"feasts\"\n template_name = \"feast_list.html\"\n \n- def get_ordering(self):\n+ def get_ordering(self) -> tuple:\n ordering = self.request.GET.get(\"sort_by\")\n # feasts can be ordered by name or feast_code,\n # default to ordering by name if given anything else\n if ordering not in [\"name\", \"feast_code\"]:\n ordering = \"name\"\n- return ordering\n+ # case insensitive ordering by name\n+ return (Lower(ordering),) if ordering == \"name\" else (ordering,)\n \n def get_queryset(self):\n queryset = super().get_queryset()\n", "issue": "List of Genres: alphabetization should be case-insensitive\nThe Lists of Feasts (this includes the list of all feasts as well as only the temporale and only the sanctorale) are ordered by alphabetic order of feast name, except that capital letters are currently taking precedence; a capital \"Z\" comes before a lowercase \"a\" and that muddles the system somewhat. Behold:\r\n\r\n<img width=\"1119\" alt=\"NewCantus temporale list\" src=\"https://github.com/DDMAL/CantusDB/assets/83373378/2c4f7d2e-df3c-4a57-8c52-7acbfe96b099\">\r\n\r\nYou can see that the alphabet thing is solid until Ad Processionem, after which the third letter becomes lowercase (Ad aquam benedictio) and the alphabet starts all over again. Maybe this is preferable, but OldCantus doesn't take capitalization into account, so I thought I'd mention it.\r\n\r\nThis is the same list on OldCantus, for reference: \r\n\r\n<img width=\"1158\" alt=\"OldCantus temporale list\" src=\"https://github.com/DDMAL/CantusDB/assets/83373378/598ba519-a37a-478b-884f-a1ff6b07ee34\">\r\n\n", "before_files": [{"content": "from django.views.generic import DetailView, ListView\nfrom main_app.models import Feast, Source\nfrom extra_views import SearchableListMixin\n\n# this categorization is not finalized yet\n# the feastcode on old cantus requires cleaning\n# for now we just leave this categorization as it is\nTEMP_PREFIX = [\n \"01\",\n \"02\",\n \"03\",\n \"04\",\n \"05\",\n \"06\",\n \"07\",\n \"08\",\n \"09\",\n \"10\",\n \"11\",\n \"16\",\n \"17\",\n]\nSANC_PREFIX = [\"12\", \"13\", \"14\", \"15\"]\n\n\nclass FeastDetailView(DetailView):\n model = Feast\n context_object_name = \"feast\"\n template_name = \"feast_detail.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n display_unpublished = self.request.user.is_authenticated\n\n chants_in_feast = self.get_object().chant_set\n if not display_unpublished:\n chants_in_feast = chants_in_feast.filter(source__published=True)\n\n cantus_ids = list(\n chants_in_feast.values_list(\"cantus_id\", flat=True).distinct()\n )\n\n counts = []\n incipits = []\n genres = []\n\n for cantus_id in cantus_ids:\n chants = chants_in_feast.filter(cantus_id=cantus_id)\n count = chants.count()\n incipit = chants.first().incipit\n genre = chants.first().genre\n\n counts.append(count)\n incipits.append(incipit)\n genres.append(genre)\n\n zipped = zip(cantus_ids, incipits, genres, counts)\n zipped = sorted(zipped, key=lambda t: t[3], reverse=True)\n context[\"frequent_chants_zip\"] = zipped\n\n source_ids = list(\n chants_in_feast.values_list(\"source__id\", flat=True).distinct()\n )\n sources = Source.objects.filter(id__in=source_ids)\n counts = [chants_in_feast.filter(source=source).count() for source in sources]\n\n zipped = zip(sources, counts)\n zipped = sorted(zipped, key=lambda t: t[1], reverse=True)\n context[\"sources_zip\"] = zipped\n\n return context\n\n\nclass FeastListView(SearchableListMixin, ListView):\n \"\"\"Searchable List view for Feast model\n\n Accessed by /feasts/\n\n When passed a ``?q=<query>`` argument in the GET request, it will filter feasts\n based on the fields defined in ``search_fields`` with the ``icontains`` lookup\n\n The feasts can also be filtered by `date` (temp/sanc) and `month` and ordered by `sort_by`,\n which are also passed as GET parameters\n \"\"\"\n\n model = Feast\n search_fields = [\"name\", \"description\", \"feast_code\"]\n paginate_by = 100\n context_object_name = \"feasts\"\n template_name = \"feast_list.html\"\n\n def get_ordering(self):\n ordering = self.request.GET.get(\"sort_by\")\n # feasts can be ordered by name or feast_code,\n # default to ordering by name if given anything else\n if ordering not in [\"name\", \"feast_code\"]:\n ordering = \"name\"\n return ordering\n\n def get_queryset(self):\n queryset = super().get_queryset()\n date = self.request.GET.get(\"date\")\n month = self.request.GET.get(\"month\")\n # temp vs sanc categorization is not finalized yet,\n # the feastcode needs to be updated by the cantus people\n if date == \"temp\":\n queryset = queryset.filter(prefix__in=TEMP_PREFIX)\n elif date == \"sanc\":\n queryset = queryset.filter(prefix__in=SANC_PREFIX)\n\n if month and (int(month)) in range(1, 13):\n month = int(month)\n queryset = queryset.filter(month=month)\n\n return queryset\n", "path": "django/cantusdb_project/main_app/views/feast.py"}]}
2,003
272
gh_patches_debug_5544
rasdani/github-patches
git_diff
mozilla__pontoon-2777
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Drop contributor profile URL with email addresses We should drop support for accessing contributor profiles by using user email addresses in the URLs. This feature, while not used internally in the app, leaks otherwise hidden user email addressed on sites like on web.archive.org index. This is particularly problematic for users without any contributions. </issue> <code> [start of pontoon/contributors/urls.py] 1 from django.urls import path, register_converter 2 from django.urls.converters import StringConverter 3 from django.views.generic import RedirectView 4 5 from . import views 6 7 8 class EmailConverter(StringConverter): 9 regex = r"[\w.%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}" 10 11 12 class UsernameConverter(StringConverter): 13 regex = r"[\w.@+-]+" 14 15 16 register_converter(EmailConverter, "email") 17 register_converter(UsernameConverter, "username") 18 19 urlpatterns = [ 20 # Legacy: Redirect to /contributors/email 21 path( 22 "contributor/<email:email>/", 23 RedirectView.as_view(url="/contributors/%(email)s/", permanent=True), 24 ), 25 # List contributors 26 path( 27 "contributors/", 28 views.ContributorsView.as_view(), 29 name="pontoon.contributors", 30 ), 31 # Contributor profile by email 32 path( 33 "contributors/<email:email>/", 34 views.contributor_email, 35 name="pontoon.contributors.contributor.email", 36 ), 37 # Contributor profile by username 38 path( 39 "contributors/<username:username>/", 40 views.contributor_username, 41 name="pontoon.contributors.contributor.username", 42 ), 43 # Verify email address 44 path( 45 "verify-email-address/<str:token>/", 46 views.verify_email_address, 47 name="pontoon.contributors.verify.email", 48 ), 49 # Current user's profile 50 path("profile/", views.profile, name="pontoon.contributors.profile"), 51 # Current user's settings 52 path("settings/", views.settings, name="pontoon.contributors.settings"), 53 # Current user's notifications 54 path( 55 "notifications/", 56 views.notifications, 57 name="pontoon.contributors.notifications", 58 ), 59 # Mark current user's notifications as read 60 path( 61 "notifications/mark-all-as-read/", 62 views.mark_all_notifications_as_read, 63 name="pontoon.contributors.notifications.mark.all.as.read", 64 ), 65 # API: Toggle user profile attribute 66 path( 67 "api/v1/user/<username:username>/", 68 views.toggle_user_profile_attribute, 69 name="pontoon.contributors.toggle_user_profile_attribute", 70 ), 71 # AJAX: Save custom homepage 72 path( 73 "save-custom-homepage/", 74 views.save_custom_homepage, 75 name="pontoon.contributors.save_custom_homepage", 76 ), 77 # AJAX: Save preferred source locale 78 path( 79 "save-preferred-source-locale/", 80 views.save_preferred_source_locale, 81 name="pontoon.contributors.save_preferred_source_locale", 82 ), 83 # AJAX: Dismiss Add-On Promotion 84 path( 85 "dismiss-addon-promotion/", 86 views.dismiss_addon_promotion, 87 name="pontoon.contributors.dismiss_addon_promotion", 88 ), 89 # AJAX: Update contribution graph 90 path( 91 "update-contribution-graph/", 92 views.update_contribution_graph, 93 name="pontoon.contributors.update_contribution_graph", 94 ), 95 # AJAX: Update contribution timeline 96 path( 97 "update-contribution-timeline/", 98 views.update_contribution_timeline, 99 name="pontoon.contributors.update_contribution_timeline", 100 ), 101 ] 102 [end of pontoon/contributors/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pontoon/contributors/urls.py b/pontoon/contributors/urls.py --- a/pontoon/contributors/urls.py +++ b/pontoon/contributors/urls.py @@ -28,12 +28,6 @@ views.ContributorsView.as_view(), name="pontoon.contributors", ), - # Contributor profile by email - path( - "contributors/<email:email>/", - views.contributor_email, - name="pontoon.contributors.contributor.email", - ), # Contributor profile by username path( "contributors/<username:username>/",
{"golden_diff": "diff --git a/pontoon/contributors/urls.py b/pontoon/contributors/urls.py\n--- a/pontoon/contributors/urls.py\n+++ b/pontoon/contributors/urls.py\n@@ -28,12 +28,6 @@\n views.ContributorsView.as_view(),\n name=\"pontoon.contributors\",\n ),\n- # Contributor profile by email\n- path(\n- \"contributors/<email:email>/\",\n- views.contributor_email,\n- name=\"pontoon.contributors.contributor.email\",\n- ),\n # Contributor profile by username\n path(\n \"contributors/<username:username>/\",\n", "issue": "Drop contributor profile URL with email addresses\nWe should drop support for accessing contributor profiles by using user email addresses in the URLs. This feature, while not used internally in the app, leaks otherwise hidden user email addressed on sites like on web.archive.org index.\r\n\r\nThis is particularly problematic for users without any contributions.\r\n\n", "before_files": [{"content": "from django.urls import path, register_converter\nfrom django.urls.converters import StringConverter\nfrom django.views.generic import RedirectView\n\nfrom . import views\n\n\nclass EmailConverter(StringConverter):\n regex = r\"[\\w.%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4}\"\n\n\nclass UsernameConverter(StringConverter):\n regex = r\"[\\w.@+-]+\"\n\n\nregister_converter(EmailConverter, \"email\")\nregister_converter(UsernameConverter, \"username\")\n\nurlpatterns = [\n # Legacy: Redirect to /contributors/email\n path(\n \"contributor/<email:email>/\",\n RedirectView.as_view(url=\"/contributors/%(email)s/\", permanent=True),\n ),\n # List contributors\n path(\n \"contributors/\",\n views.ContributorsView.as_view(),\n name=\"pontoon.contributors\",\n ),\n # Contributor profile by email\n path(\n \"contributors/<email:email>/\",\n views.contributor_email,\n name=\"pontoon.contributors.contributor.email\",\n ),\n # Contributor profile by username\n path(\n \"contributors/<username:username>/\",\n views.contributor_username,\n name=\"pontoon.contributors.contributor.username\",\n ),\n # Verify email address\n path(\n \"verify-email-address/<str:token>/\",\n views.verify_email_address,\n name=\"pontoon.contributors.verify.email\",\n ),\n # Current user's profile\n path(\"profile/\", views.profile, name=\"pontoon.contributors.profile\"),\n # Current user's settings\n path(\"settings/\", views.settings, name=\"pontoon.contributors.settings\"),\n # Current user's notifications\n path(\n \"notifications/\",\n views.notifications,\n name=\"pontoon.contributors.notifications\",\n ),\n # Mark current user's notifications as read\n path(\n \"notifications/mark-all-as-read/\",\n views.mark_all_notifications_as_read,\n name=\"pontoon.contributors.notifications.mark.all.as.read\",\n ),\n # API: Toggle user profile attribute\n path(\n \"api/v1/user/<username:username>/\",\n views.toggle_user_profile_attribute,\n name=\"pontoon.contributors.toggle_user_profile_attribute\",\n ),\n # AJAX: Save custom homepage\n path(\n \"save-custom-homepage/\",\n views.save_custom_homepage,\n name=\"pontoon.contributors.save_custom_homepage\",\n ),\n # AJAX: Save preferred source locale\n path(\n \"save-preferred-source-locale/\",\n views.save_preferred_source_locale,\n name=\"pontoon.contributors.save_preferred_source_locale\",\n ),\n # AJAX: Dismiss Add-On Promotion\n path(\n \"dismiss-addon-promotion/\",\n views.dismiss_addon_promotion,\n name=\"pontoon.contributors.dismiss_addon_promotion\",\n ),\n # AJAX: Update contribution graph\n path(\n \"update-contribution-graph/\",\n views.update_contribution_graph,\n name=\"pontoon.contributors.update_contribution_graph\",\n ),\n # AJAX: Update contribution timeline\n path(\n \"update-contribution-timeline/\",\n views.update_contribution_timeline,\n name=\"pontoon.contributors.update_contribution_timeline\",\n ),\n]\n", "path": "pontoon/contributors/urls.py"}]}
1,484
146
gh_patches_debug_1910
rasdani/github-patches
git_diff
gratipay__gratipay.com-2699
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [email protected] still linked several places Should be [email protected], right? ;-) </issue> <code> [start of gratipay/exceptions.py] 1 """ 2 This module contains exceptions shared across application code. 3 """ 4 5 from __future__ import print_function, unicode_literals 6 7 8 class ProblemChangingUsername(Exception): 9 def __str__(self): 10 return self.msg.format(self.args[0]) 11 12 class UsernameIsEmpty(ProblemChangingUsername): 13 msg = "You need to provide a username!" 14 15 class UsernameTooLong(ProblemChangingUsername): 16 msg = "The username '{}' is too long." 17 18 class UsernameContainsInvalidCharacters(ProblemChangingUsername): 19 msg = "The username '{}' contains invalid characters." 20 21 class UsernameIsRestricted(ProblemChangingUsername): 22 msg = "The username '{}' is restricted." 23 24 class UsernameAlreadyTaken(ProblemChangingUsername): 25 msg = "The username '{}' is already taken." 26 27 28 class ProblemChangingNumber(Exception): 29 def __str__(self): 30 return self.msg 31 32 class HasBigTips(ProblemChangingNumber): 33 msg = "You receive tips too large for an individual. Please contact [email protected]." 34 35 36 class TooGreedy(Exception): pass 37 class NoSelfTipping(Exception): pass 38 class NoTippee(Exception): pass 39 class BadAmount(Exception): pass 40 class UserDoesntAcceptTips(Exception): pass 41 42 class FailedToReserveUsername(Exception): pass 43 44 class NegativeBalance(Exception): 45 def __str__(self): 46 return "Negative balance not allowed in this context." 47 48 class NotWhitelisted(Exception): pass 49 class NoBalancedCustomerHref(Exception): pass 50 [end of gratipay/exceptions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gratipay/exceptions.py b/gratipay/exceptions.py --- a/gratipay/exceptions.py +++ b/gratipay/exceptions.py @@ -30,7 +30,7 @@ return self.msg class HasBigTips(ProblemChangingNumber): - msg = "You receive tips too large for an individual. Please contact [email protected]." + msg = "You receive tips too large for an individual. Please contact [email protected]." class TooGreedy(Exception): pass
{"golden_diff": "diff --git a/gratipay/exceptions.py b/gratipay/exceptions.py\n--- a/gratipay/exceptions.py\n+++ b/gratipay/exceptions.py\n@@ -30,7 +30,7 @@\n return self.msg\n \n class HasBigTips(ProblemChangingNumber):\n- msg = \"You receive tips too large for an individual. Please contact [email protected].\"\n+ msg = \"You receive tips too large for an individual. Please contact [email protected].\"\n \n \n class TooGreedy(Exception): pass\n", "issue": "[email protected] still linked several places\nShould be [email protected], right? ;-)\n\n", "before_files": [{"content": "\"\"\"\nThis module contains exceptions shared across application code.\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals\n\n\nclass ProblemChangingUsername(Exception):\n def __str__(self):\n return self.msg.format(self.args[0])\n\nclass UsernameIsEmpty(ProblemChangingUsername):\n msg = \"You need to provide a username!\"\n\nclass UsernameTooLong(ProblemChangingUsername):\n msg = \"The username '{}' is too long.\"\n\nclass UsernameContainsInvalidCharacters(ProblemChangingUsername):\n msg = \"The username '{}' contains invalid characters.\"\n\nclass UsernameIsRestricted(ProblemChangingUsername):\n msg = \"The username '{}' is restricted.\"\n\nclass UsernameAlreadyTaken(ProblemChangingUsername):\n msg = \"The username '{}' is already taken.\"\n\n\nclass ProblemChangingNumber(Exception):\n def __str__(self):\n return self.msg\n\nclass HasBigTips(ProblemChangingNumber):\n msg = \"You receive tips too large for an individual. Please contact [email protected].\"\n\n\nclass TooGreedy(Exception): pass\nclass NoSelfTipping(Exception): pass\nclass NoTippee(Exception): pass\nclass BadAmount(Exception): pass\nclass UserDoesntAcceptTips(Exception): pass\n\nclass FailedToReserveUsername(Exception): pass\n\nclass NegativeBalance(Exception):\n def __str__(self):\n return \"Negative balance not allowed in this context.\"\n\nclass NotWhitelisted(Exception): pass\nclass NoBalancedCustomerHref(Exception): pass\n", "path": "gratipay/exceptions.py"}]}
952
117
gh_patches_debug_40189
rasdani/github-patches
git_diff
mne-tools__mne-python-3423
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Memory consumption in plot_brainstorm_phantom_elekta Circle is failing because it's using so much memory. </issue> <code> [start of tutorials/plot_brainstorm_phantom_elekta.py] 1 # -*- coding: utf-8 -*- 2 """ 3 ========================================== 4 Brainstorm Elekta phantom tutorial dataset 5 ========================================== 6 7 Here we compute the evoked from raw for the Brainstorm Elekta phantom 8 tutorial dataset. For comparison, see [1]_ and: 9 10 http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta 11 12 References 13 ---------- 14 .. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM. 15 Brainstorm: A User-Friendly Application for MEG/EEG Analysis. 16 Computational Intelligence and Neuroscience, vol. 2011, Article ID 17 879716, 13 pages, 2011. doi:10.1155/2011/879716 18 """ 19 20 # Authors: Eric Larson <[email protected]> 21 # 22 # License: BSD (3-clause) 23 24 import os.path as op 25 import numpy as np 26 27 import mne 28 from mne import find_events, fit_dipole 29 from mne.datasets.brainstorm import bst_phantom_elekta 30 from mne.io import read_raw_fif 31 32 print(__doc__) 33 34 ############################################################################### 35 # The data were collected with an Elekta Neuromag VectorView system at 1000 Hz 36 # and low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data 37 # are read to construct instances of :class:`mne.io.Raw`. 38 data_path = bst_phantom_elekta.data_path() 39 40 raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif') 41 raw = read_raw_fif(raw_fname, preload=True) 42 43 ############################################################################### 44 # Data channel array consisted of 204 MEG planor gradiometers, 45 # 102 axial magnetometers, and 3 stimulus channels. Let's get the events 46 # for the phantom, where each dipole (1-32) gets its own event: 47 48 events = find_events(raw, 'STI201') 49 raw.plot(events=events) 50 raw.info['bads'] = ['MEG2421'] 51 52 ############################################################################### 53 # The data have strong line frequency (60 Hz and harmonics) and cHPI coil 54 # noise (five peaks around 300 Hz): 55 56 raw.plot_psd() 57 58 ############################################################################### 59 # We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter. 60 61 raw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s') 62 raw.plot_psd() 63 64 ############################################################################### 65 # The data are still a bit noisy, so let's use Maxwell filtering to clean it. 66 # Ideally we would have the fine calibration and cross-talk information 67 # for the site of interest, but we don't, so we just do: 68 69 raw.fix_mag_coil_types() 70 raw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.)) 71 raw.plot(events=events) 72 73 ############################################################################### 74 # Now we epoch our data, average it, and look at the first dipole response. 75 # The first peak appears around 3 ms. 76 77 tmin, tmax = -0.2, 0.2 78 event_id = list(range(1, 33)) 79 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01)) 80 epochs['1'].average().plot() 81 82 ############################################################################### 83 # Let's do some dipole fits. The phantom is properly modeled by a single-shell 84 # sphere with origin (0., 0., 0.). We compute covariance, then do the fits. 85 86 t_peak = 0.061 # 3 MS at largest peak 87 sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None) 88 cov = mne.compute_covariance(epochs, tmax=0) 89 data = [] 90 for ii in range(1, 33): 91 evoked = epochs[str(ii)].average().crop(t_peak, t_peak) 92 data.append(evoked.data[:, 0]) 93 evoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.) 94 dip = fit_dipole(evoked, cov, sphere, n_jobs=2)[0] 95 96 ############################################################################### 97 # Now we can compare to the actual locations, taking the difference in mm: 98 99 actual_pos = mne.dipole.get_phantom_dipoles(kind='122')[0] 100 diffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1)) 101 print('Differences (mm):\n%s' % diffs[:, np.newaxis]) 102 [end of tutorials/plot_brainstorm_phantom_elekta.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tutorials/plot_brainstorm_phantom_elekta.py b/tutorials/plot_brainstorm_phantom_elekta.py --- a/tutorials/plot_brainstorm_phantom_elekta.py +++ b/tutorials/plot_brainstorm_phantom_elekta.py @@ -38,7 +38,7 @@ data_path = bst_phantom_elekta.data_path() raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif') -raw = read_raw_fif(raw_fname, preload=True) +raw = read_raw_fif(raw_fname) ############################################################################### # Data channel array consisted of 204 MEG planor gradiometers, @@ -51,39 +51,41 @@ ############################################################################### # The data have strong line frequency (60 Hz and harmonics) and cHPI coil -# noise (five peaks around 300 Hz): +# noise (five peaks around 300 Hz). Here we plot only out to 60 seconds +# to save memory: -raw.plot_psd() +raw.plot_psd(tmax=60.) ############################################################################### -# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter. - -raw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s') -raw.plot_psd() - -############################################################################### -# The data are still a bit noisy, so let's use Maxwell filtering to clean it. +# Let's use Maxwell filtering to clean the data a bit. # Ideally we would have the fine calibration and cross-talk information # for the site of interest, but we don't, so we just do: raw.fix_mag_coil_types() raw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.)) + +############################################################################### +# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter. + +raw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s') raw.plot(events=events) ############################################################################### # Now we epoch our data, average it, and look at the first dipole response. -# The first peak appears around 3 ms. +# The first peak appears around 3 ms. Because we low-passed at 40 Hz, +# we can also decimate our data to save memory. -tmin, tmax = -0.2, 0.2 +tmin, tmax = -0.1, 0.1 event_id = list(range(1, 33)) -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01)) +epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01), + decim=5) epochs['1'].average().plot() ############################################################################### # Let's do some dipole fits. The phantom is properly modeled by a single-shell # sphere with origin (0., 0., 0.). We compute covariance, then do the fits. -t_peak = 0.061 # 3 MS at largest peak +t_peak = 60e-3 # ~60 MS at largest peak sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None) cov = mne.compute_covariance(epochs, tmax=0) data = []
{"golden_diff": "diff --git a/tutorials/plot_brainstorm_phantom_elekta.py b/tutorials/plot_brainstorm_phantom_elekta.py\n--- a/tutorials/plot_brainstorm_phantom_elekta.py\n+++ b/tutorials/plot_brainstorm_phantom_elekta.py\n@@ -38,7 +38,7 @@\n data_path = bst_phantom_elekta.data_path()\n \n raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')\n-raw = read_raw_fif(raw_fname, preload=True)\n+raw = read_raw_fif(raw_fname)\n \n ###############################################################################\n # Data channel array consisted of 204 MEG planor gradiometers,\n@@ -51,39 +51,41 @@\n \n ###############################################################################\n # The data have strong line frequency (60 Hz and harmonics) and cHPI coil\n-# noise (five peaks around 300 Hz):\n+# noise (five peaks around 300 Hz). Here we plot only out to 60 seconds\n+# to save memory:\n \n-raw.plot_psd()\n+raw.plot_psd(tmax=60.)\n \n ###############################################################################\n-# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.\n-\n-raw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')\n-raw.plot_psd()\n-\n-###############################################################################\n-# The data are still a bit noisy, so let's use Maxwell filtering to clean it.\n+# Let's use Maxwell filtering to clean the data a bit.\n # Ideally we would have the fine calibration and cross-talk information\n # for the site of interest, but we don't, so we just do:\n \n raw.fix_mag_coil_types()\n raw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))\n+\n+###############################################################################\n+# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.\n+\n+raw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')\n raw.plot(events=events)\n \n ###############################################################################\n # Now we epoch our data, average it, and look at the first dipole response.\n-# The first peak appears around 3 ms.\n+# The first peak appears around 3 ms. Because we low-passed at 40 Hz,\n+# we can also decimate our data to save memory.\n \n-tmin, tmax = -0.2, 0.2\n+tmin, tmax = -0.1, 0.1\n event_id = list(range(1, 33))\n-epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01))\n+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01),\n+ decim=5)\n epochs['1'].average().plot()\n \n ###############################################################################\n # Let's do some dipole fits. The phantom is properly modeled by a single-shell\n # sphere with origin (0., 0., 0.). We compute covariance, then do the fits.\n \n-t_peak = 0.061 # 3 MS at largest peak\n+t_peak = 60e-3 # ~60 MS at largest peak\n sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)\n cov = mne.compute_covariance(epochs, tmax=0)\n data = []\n", "issue": "Memory consumption in plot_brainstorm_phantom_elekta\nCircle is failing because it's using so much memory.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n==========================================\nBrainstorm Elekta phantom tutorial dataset\n==========================================\n\nHere we compute the evoked from raw for the Brainstorm Elekta phantom\ntutorial dataset. For comparison, see [1]_ and:\n\n http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta\n\nReferences\n----------\n.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.\n Brainstorm: A User-Friendly Application for MEG/EEG Analysis.\n Computational Intelligence and Neuroscience, vol. 2011, Article ID\n 879716, 13 pages, 2011. doi:10.1155/2011/879716\n\"\"\"\n\n# Authors: Eric Larson <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport numpy as np\n\nimport mne\nfrom mne import find_events, fit_dipole\nfrom mne.datasets.brainstorm import bst_phantom_elekta\nfrom mne.io import read_raw_fif\n\nprint(__doc__)\n\n###############################################################################\n# The data were collected with an Elekta Neuromag VectorView system at 1000 Hz\n# and low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data\n# are read to construct instances of :class:`mne.io.Raw`.\ndata_path = bst_phantom_elekta.data_path()\n\nraw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')\nraw = read_raw_fif(raw_fname, preload=True)\n\n###############################################################################\n# Data channel array consisted of 204 MEG planor gradiometers,\n# 102 axial magnetometers, and 3 stimulus channels. Let's get the events\n# for the phantom, where each dipole (1-32) gets its own event:\n\nevents = find_events(raw, 'STI201')\nraw.plot(events=events)\nraw.info['bads'] = ['MEG2421']\n\n###############################################################################\n# The data have strong line frequency (60 Hz and harmonics) and cHPI coil\n# noise (five peaks around 300 Hz):\n\nraw.plot_psd()\n\n###############################################################################\n# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.\n\nraw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')\nraw.plot_psd()\n\n###############################################################################\n# The data are still a bit noisy, so let's use Maxwell filtering to clean it.\n# Ideally we would have the fine calibration and cross-talk information\n# for the site of interest, but we don't, so we just do:\n\nraw.fix_mag_coil_types()\nraw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))\nraw.plot(events=events)\n\n###############################################################################\n# Now we epoch our data, average it, and look at the first dipole response.\n# The first peak appears around 3 ms.\n\ntmin, tmax = -0.2, 0.2\nevent_id = list(range(1, 33))\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01))\nepochs['1'].average().plot()\n\n###############################################################################\n# Let's do some dipole fits. The phantom is properly modeled by a single-shell\n# sphere with origin (0., 0., 0.). We compute covariance, then do the fits.\n\nt_peak = 0.061 # 3 MS at largest peak\nsphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)\ncov = mne.compute_covariance(epochs, tmax=0)\ndata = []\nfor ii in range(1, 33):\n evoked = epochs[str(ii)].average().crop(t_peak, t_peak)\n data.append(evoked.data[:, 0])\nevoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.)\ndip = fit_dipole(evoked, cov, sphere, n_jobs=2)[0]\n\n###############################################################################\n# Now we can compare to the actual locations, taking the difference in mm:\n\nactual_pos = mne.dipole.get_phantom_dipoles(kind='122')[0]\ndiffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1))\nprint('Differences (mm):\\n%s' % diffs[:, np.newaxis])\n", "path": "tutorials/plot_brainstorm_phantom_elekta.py"}]}
1,806
773
gh_patches_debug_57
rasdani/github-patches
git_diff
Anselmoo__spectrafit-695
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Docs]: Update labeled criteria for CHANGELOG ### Is there an existing issue for this? - [X] I have searched the existing issues ### Current Missing Information in the Docs Update the labeler criteria for avoiding double labels in `CHANGELOG.md` ### Anything else? _No response_ ### Code of Conduct - [X] I agree to follow this project's Code of Conduct </issue> <code> [start of spectrafit/__init__.py] 1 """SpectraFit, fast command line tool for fitting data.""" 2 __version__ = "1.0.0a7" 3 [end of spectrafit/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py --- a/spectrafit/__init__.py +++ b/spectrafit/__init__.py @@ -1,2 +1,2 @@ """SpectraFit, fast command line tool for fitting data.""" -__version__ = "1.0.0a7" +__version__ = "1.0.0a8"
{"golden_diff": "diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py\n--- a/spectrafit/__init__.py\n+++ b/spectrafit/__init__.py\n@@ -1,2 +1,2 @@\n \"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n-__version__ = \"1.0.0a7\"\n+__version__ = \"1.0.0a8\"\n", "issue": "[Docs]: Update labeled criteria for CHANGELOG\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Missing Information in the Docs\n\nUpdate the labeler criteria for avoiding double labels in `CHANGELOG.md`\n\n### Anything else?\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a7\"\n", "path": "spectrafit/__init__.py"}]}
648
97
gh_patches_debug_27167
rasdani/github-patches
git_diff
conan-io__conan-3254
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Enable python 3.7 in CI At least for Linux </issue> <code> [start of conans/pylint_plugin.py] 1 """Pylint plugin for ConanFile""" 2 3 import astroid 4 from astroid import MANAGER, scoped_nodes 5 6 7 def register(linter): 8 """Declare package as plugin 9 10 This function needs to be declared so astroid treats 11 current file as a plugin. 12 """ 13 pass 14 15 16 def transform_conanfile(node): 17 """Transform definition of ConanFile class so dynamic fields are visible to pylint""" 18 19 str_class = scoped_nodes.builtin_lookup("str") 20 info_class = MANAGER.ast_from_module_name("conans.model.info").lookup( 21 "ConanInfo") 22 build_requires_class = MANAGER.ast_from_module_name( 23 "conans.client.graph.graph_manager").lookup("_RecipeBuildRequires") 24 file_copier_class = MANAGER.ast_from_module_name( 25 "conans.client.file_copier").lookup("FileCopier") 26 file_importer_class = MANAGER.ast_from_module_name( 27 "conans.client.importer").lookup("_FileImporter") 28 29 dynamic_fields = { 30 "source_folder": str_class, 31 "build_folder": str_class, 32 "package_folder": str_class, 33 "build_requires": build_requires_class, 34 "info_build": info_class, 35 "info": info_class, 36 "copy": file_copier_class, 37 "copy_deps": file_importer_class, 38 } 39 40 for f, t in dynamic_fields.items(): 41 node.locals[f] = [t] 42 43 44 MANAGER.register_transform( 45 scoped_nodes.Class, transform_conanfile, 46 lambda node: node.qname() == "conans.model.conan_file.ConanFile") 47 [end of conans/pylint_plugin.py] [start of .ci/jenkins/conf.py] 1 import argparse 2 import os 3 import platform 4 from contextlib import contextmanager 5 6 winpylocation = {"py27": "C:\\Python27\\python.exe", 7 "py34": "C:\\Python34\\python.exe", 8 "py36": "C:\\Python36\\python.exe"} 9 10 macpylocation = {"py27": "/usr/bin/python", # /Users/jenkins_ci/.pyenv/versions/2.7.11/bin/python", 11 "py34": "/Users/jenkins_ci/.pyenv/versions/3.4.7/bin/python", 12 "py36": "/Users/jenkins_ci/.pyenv/versions/3.6.5/bin/python"} 13 14 linuxpylocation = {"py27": "/usr/bin/python2.7", 15 "py34": "/usr/bin/python3.4", 16 "py36": "/usr/bin/python3.6"} 17 18 19 def get_environ(tmp_path): 20 if platform.system() == "Windows": 21 return {"CONAN_BASH_PATH": "c:/tools/msys64/usr/bin/bash", 22 "CONAN_USER_HOME_SHORT": os.path.join(tmp_path, ".conan")} 23 return {} 24 25 26 class Extender(argparse.Action): 27 """Allows to use the same flag several times in a command and creates a list with the values. 28 For example: 29 conan install MyPackage/1.2@user/channel -o qt:value -o mode:2 -s cucumber:true 30 It creates: 31 options = ['qt:value', 'mode:2'] 32 settings = ['cucumber:true'] 33 """ 34 def __call__(self, parser, namespace, values, option_strings=None): # @UnusedVariable 35 # Need None here incase `argparse.SUPPRESS` was supplied for `dest` 36 dest = getattr(namespace, self.dest, None) 37 if not hasattr(dest, 'extend') or dest == self.default: 38 dest = [] 39 setattr(namespace, self.dest, dest) 40 # if default isn't set to None, this method might be called 41 # with the default as `values` for other arguments which 42 # share this destination. 43 parser.set_defaults(**{self.dest: None}) 44 45 try: 46 dest.extend(values) 47 except ValueError: 48 dest.append(values) 49 50 51 @contextmanager 52 def environment_append(env_vars): 53 old_env = dict(os.environ) 54 for name, value in env_vars.items(): 55 if isinstance(value, list): 56 env_vars[name] = os.pathsep.join(value) 57 if name in old_env: 58 env_vars[name] += os.pathsep + old_env[name] 59 os.environ.update(env_vars) 60 try: 61 yield 62 finally: 63 os.environ.clear() 64 os.environ.update(old_env) 65 66 67 @contextmanager 68 def chdir(newdir): 69 old_path = os.getcwd() 70 os.chdir(newdir) 71 try: 72 yield 73 finally: 74 os.chdir(old_path) 75 [end of .ci/jenkins/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/.ci/jenkins/conf.py b/.ci/jenkins/conf.py --- a/.ci/jenkins/conf.py +++ b/.ci/jenkins/conf.py @@ -13,7 +13,8 @@ linuxpylocation = {"py27": "/usr/bin/python2.7", "py34": "/usr/bin/python3.4", - "py36": "/usr/bin/python3.6"} + "py36": "/usr/bin/python3.6", + "py37": "/usr/bin/python3.7"} def get_environ(tmp_path): diff --git a/conans/pylint_plugin.py b/conans/pylint_plugin.py --- a/conans/pylint_plugin.py +++ b/conans/pylint_plugin.py @@ -1,7 +1,6 @@ """Pylint plugin for ConanFile""" - import astroid -from astroid import MANAGER, scoped_nodes +from astroid import MANAGER def register(linter): @@ -16,7 +15,7 @@ def transform_conanfile(node): """Transform definition of ConanFile class so dynamic fields are visible to pylint""" - str_class = scoped_nodes.builtin_lookup("str") + str_class = astroid.builtin_lookup("str") info_class = MANAGER.ast_from_module_name("conans.model.info").lookup( "ConanInfo") build_requires_class = MANAGER.ast_from_module_name( @@ -42,5 +41,5 @@ MANAGER.register_transform( - scoped_nodes.Class, transform_conanfile, + astroid.ClassDef, transform_conanfile, lambda node: node.qname() == "conans.model.conan_file.ConanFile")
{"golden_diff": "diff --git a/.ci/jenkins/conf.py b/.ci/jenkins/conf.py\n--- a/.ci/jenkins/conf.py\n+++ b/.ci/jenkins/conf.py\n@@ -13,7 +13,8 @@\n \n linuxpylocation = {\"py27\": \"/usr/bin/python2.7\",\n \"py34\": \"/usr/bin/python3.4\",\n- \"py36\": \"/usr/bin/python3.6\"}\n+ \"py36\": \"/usr/bin/python3.6\",\n+ \"py37\": \"/usr/bin/python3.7\"}\n \n \n def get_environ(tmp_path):\ndiff --git a/conans/pylint_plugin.py b/conans/pylint_plugin.py\n--- a/conans/pylint_plugin.py\n+++ b/conans/pylint_plugin.py\n@@ -1,7 +1,6 @@\n \"\"\"Pylint plugin for ConanFile\"\"\"\n-\n import astroid\n-from astroid import MANAGER, scoped_nodes\n+from astroid import MANAGER\n \n \n def register(linter):\n@@ -16,7 +15,7 @@\n def transform_conanfile(node):\n \"\"\"Transform definition of ConanFile class so dynamic fields are visible to pylint\"\"\"\n \n- str_class = scoped_nodes.builtin_lookup(\"str\")\n+ str_class = astroid.builtin_lookup(\"str\")\n info_class = MANAGER.ast_from_module_name(\"conans.model.info\").lookup(\n \"ConanInfo\")\n build_requires_class = MANAGER.ast_from_module_name(\n@@ -42,5 +41,5 @@\n \n \n MANAGER.register_transform(\n- scoped_nodes.Class, transform_conanfile,\n+ astroid.ClassDef, transform_conanfile,\n lambda node: node.qname() == \"conans.model.conan_file.ConanFile\")\n", "issue": "Enable python 3.7 in CI \nAt least for Linux\r\n\r\n\n", "before_files": [{"content": "\"\"\"Pylint plugin for ConanFile\"\"\"\n\nimport astroid\nfrom astroid import MANAGER, scoped_nodes\n\n\ndef register(linter):\n \"\"\"Declare package as plugin\n \n This function needs to be declared so astroid treats\n current file as a plugin.\n \"\"\"\n pass\n\n\ndef transform_conanfile(node):\n \"\"\"Transform definition of ConanFile class so dynamic fields are visible to pylint\"\"\"\n\n str_class = scoped_nodes.builtin_lookup(\"str\")\n info_class = MANAGER.ast_from_module_name(\"conans.model.info\").lookup(\n \"ConanInfo\")\n build_requires_class = MANAGER.ast_from_module_name(\n \"conans.client.graph.graph_manager\").lookup(\"_RecipeBuildRequires\")\n file_copier_class = MANAGER.ast_from_module_name(\n \"conans.client.file_copier\").lookup(\"FileCopier\")\n file_importer_class = MANAGER.ast_from_module_name(\n \"conans.client.importer\").lookup(\"_FileImporter\")\n\n dynamic_fields = {\n \"source_folder\": str_class,\n \"build_folder\": str_class,\n \"package_folder\": str_class,\n \"build_requires\": build_requires_class,\n \"info_build\": info_class,\n \"info\": info_class,\n \"copy\": file_copier_class,\n \"copy_deps\": file_importer_class,\n }\n\n for f, t in dynamic_fields.items():\n node.locals[f] = [t]\n\n\nMANAGER.register_transform(\n scoped_nodes.Class, transform_conanfile,\n lambda node: node.qname() == \"conans.model.conan_file.ConanFile\")\n", "path": "conans/pylint_plugin.py"}, {"content": "import argparse\nimport os\nimport platform\nfrom contextlib import contextmanager\n\nwinpylocation = {\"py27\": \"C:\\\\Python27\\\\python.exe\",\n \"py34\": \"C:\\\\Python34\\\\python.exe\",\n \"py36\": \"C:\\\\Python36\\\\python.exe\"}\n\nmacpylocation = {\"py27\": \"/usr/bin/python\", # /Users/jenkins_ci/.pyenv/versions/2.7.11/bin/python\",\n \"py34\": \"/Users/jenkins_ci/.pyenv/versions/3.4.7/bin/python\",\n \"py36\": \"/Users/jenkins_ci/.pyenv/versions/3.6.5/bin/python\"}\n\nlinuxpylocation = {\"py27\": \"/usr/bin/python2.7\",\n \"py34\": \"/usr/bin/python3.4\",\n \"py36\": \"/usr/bin/python3.6\"}\n\n\ndef get_environ(tmp_path):\n if platform.system() == \"Windows\":\n return {\"CONAN_BASH_PATH\": \"c:/tools/msys64/usr/bin/bash\",\n \"CONAN_USER_HOME_SHORT\": os.path.join(tmp_path, \".conan\")}\n return {}\n\n\nclass Extender(argparse.Action):\n \"\"\"Allows to use the same flag several times in a command and creates a list with the values.\n For example:\n conan install MyPackage/1.2@user/channel -o qt:value -o mode:2 -s cucumber:true\n It creates:\n options = ['qt:value', 'mode:2']\n settings = ['cucumber:true']\n \"\"\"\n def __call__(self, parser, namespace, values, option_strings=None): # @UnusedVariable\n # Need None here incase `argparse.SUPPRESS` was supplied for `dest`\n dest = getattr(namespace, self.dest, None)\n if not hasattr(dest, 'extend') or dest == self.default:\n dest = []\n setattr(namespace, self.dest, dest)\n # if default isn't set to None, this method might be called\n # with the default as `values` for other arguments which\n # share this destination.\n parser.set_defaults(**{self.dest: None})\n\n try:\n dest.extend(values)\n except ValueError:\n dest.append(values)\n\n\n@contextmanager\ndef environment_append(env_vars):\n old_env = dict(os.environ)\n for name, value in env_vars.items():\n if isinstance(value, list):\n env_vars[name] = os.pathsep.join(value)\n if name in old_env:\n env_vars[name] += os.pathsep + old_env[name]\n os.environ.update(env_vars)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_env)\n\n\n@contextmanager\ndef chdir(newdir):\n old_path = os.getcwd()\n os.chdir(newdir)\n try:\n yield\n finally:\n os.chdir(old_path)\n", "path": ".ci/jenkins/conf.py"}]}
1,773
377
gh_patches_debug_24500
rasdani/github-patches
git_diff
mirumee__ariadne-23
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create `resolve_to` utility Python variables are usually named using the `snake_case`, whereas majority of JS written today uses the `camelCase`. Ariadne will need to provide a way to resolve one to another. The idea is to create `resolver_for` function taking single `name` argument and returning resolver for specified attr/key name, enabling **explicit** mapping as such: ``` resolvers_map = { "User": { "lastVisitedOn": resolve_to("last_visited_on") } } ``` Other advantage of having such utility would be arbitrary field names: ``` resolvers_map = { "User": { "lastVisitedOn": resolve_to("last_visit") } } ``` </issue> <code> [start of ariadne/resolvers.py] 1 from graphql import GraphQLObjectType, GraphQLScalarType, GraphQLSchema 2 from graphql.execution.base import ResolveInfo 3 4 5 def default_resolver(parent, info: ResolveInfo): 6 if isinstance(parent, dict): 7 return parent.get(info.field_name) 8 return getattr(parent, info.field_name, None) 9 10 11 def add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict): 12 for type_name, type_object in schema.get_type_map().items(): 13 if isinstance(type_object, GraphQLObjectType): 14 add_resolve_functions_to_object(type_name, type_object, resolvers) 15 if isinstance(type_object, GraphQLScalarType): 16 add_resolve_function_to_scalar(type_name, type_object, resolvers) 17 18 19 def add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict): 20 type_resolver = resolvers.get(name, {}) 21 for field_name, field_object in obj.fields.items(): 22 field_resolver = type_resolver.get(field_name, default_resolver) 23 field_object.resolver = field_resolver 24 25 26 def add_resolve_function_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict): 27 serializer = resolvers.get(name, obj.serialize) 28 obj.serialize = serializer 29 [end of ariadne/resolvers.py] [start of ariadne/__init__.py] 1 from .build_schema import build_schema_from_type_definitions 2 from .executable_schema import make_executable_schema 3 from .resolvers import add_resolve_functions_to_schema, default_resolver 4 5 __all__ = [ 6 "add_resolve_functions_to_schema", 7 "build_schema_from_type_definitions", 8 "default_resolver", 9 "make_executable_schema", 10 ] 11 [end of ariadne/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ariadne/__init__.py b/ariadne/__init__.py --- a/ariadne/__init__.py +++ b/ariadne/__init__.py @@ -1,10 +1,11 @@ from .build_schema import build_schema_from_type_definitions from .executable_schema import make_executable_schema -from .resolvers import add_resolve_functions_to_schema, default_resolver +from .resolvers import add_resolve_functions_to_schema, default_resolver, resolve_to __all__ = [ "add_resolve_functions_to_schema", "build_schema_from_type_definitions", "default_resolver", "make_executable_schema", + "resolve_to", ] diff --git a/ariadne/resolvers.py b/ariadne/resolvers.py --- a/ariadne/resolvers.py +++ b/ariadne/resolvers.py @@ -2,10 +2,21 @@ from graphql.execution.base import ResolveInfo -def default_resolver(parent, info: ResolveInfo): +def resolve_parent_field(parent, name: str): if isinstance(parent, dict): - return parent.get(info.field_name) - return getattr(parent, info.field_name, None) + return parent.get(name) + return getattr(parent, name, None) + + +def default_resolver(parent, info: ResolveInfo): + return resolve_parent_field(parent, info.field_name) + + +def resolve_to(name: str): + def resolver(parent, *_): + return resolve_parent_field(parent, name) + + return resolver def add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):
{"golden_diff": "diff --git a/ariadne/__init__.py b/ariadne/__init__.py\n--- a/ariadne/__init__.py\n+++ b/ariadne/__init__.py\n@@ -1,10 +1,11 @@\n from .build_schema import build_schema_from_type_definitions\n from .executable_schema import make_executable_schema\n-from .resolvers import add_resolve_functions_to_schema, default_resolver\n+from .resolvers import add_resolve_functions_to_schema, default_resolver, resolve_to\n \n __all__ = [\n \"add_resolve_functions_to_schema\",\n \"build_schema_from_type_definitions\",\n \"default_resolver\",\n \"make_executable_schema\",\n+ \"resolve_to\",\n ]\ndiff --git a/ariadne/resolvers.py b/ariadne/resolvers.py\n--- a/ariadne/resolvers.py\n+++ b/ariadne/resolvers.py\n@@ -2,10 +2,21 @@\n from graphql.execution.base import ResolveInfo\n \n \n-def default_resolver(parent, info: ResolveInfo):\n+def resolve_parent_field(parent, name: str):\n if isinstance(parent, dict):\n- return parent.get(info.field_name)\n- return getattr(parent, info.field_name, None)\n+ return parent.get(name)\n+ return getattr(parent, name, None)\n+\n+\n+def default_resolver(parent, info: ResolveInfo):\n+ return resolve_parent_field(parent, info.field_name)\n+\n+\n+def resolve_to(name: str):\n+ def resolver(parent, *_):\n+ return resolve_parent_field(parent, name)\n+\n+ return resolver\n \n \n def add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):\n", "issue": "Create `resolve_to` utility\nPython variables are usually named using the `snake_case`, whereas majority of JS written today uses the `camelCase`. Ariadne will need to provide a way to resolve one to another.\r\n\r\nThe idea is to create `resolver_for` function taking single `name` argument and returning resolver for specified attr/key name, enabling **explicit** mapping as such:\r\n\r\n```\r\nresolvers_map = {\r\n \"User\": {\r\n \"lastVisitedOn\": resolve_to(\"last_visited_on\")\r\n }\r\n}\r\n```\r\n\r\nOther advantage of having such utility would be arbitrary field names:\r\n\r\n```\r\nresolvers_map = {\r\n \"User\": {\r\n \"lastVisitedOn\": resolve_to(\"last_visit\")\r\n }\r\n}\r\n```\n", "before_files": [{"content": "from graphql import GraphQLObjectType, GraphQLScalarType, GraphQLSchema\nfrom graphql.execution.base import ResolveInfo\n\n\ndef default_resolver(parent, info: ResolveInfo):\n if isinstance(parent, dict):\n return parent.get(info.field_name)\n return getattr(parent, info.field_name, None)\n\n\ndef add_resolve_functions_to_schema(schema: GraphQLSchema, resolvers: dict):\n for type_name, type_object in schema.get_type_map().items():\n if isinstance(type_object, GraphQLObjectType):\n add_resolve_functions_to_object(type_name, type_object, resolvers)\n if isinstance(type_object, GraphQLScalarType):\n add_resolve_function_to_scalar(type_name, type_object, resolvers)\n\n\ndef add_resolve_functions_to_object(name: str, obj: GraphQLObjectType, resolvers: dict):\n type_resolver = resolvers.get(name, {})\n for field_name, field_object in obj.fields.items():\n field_resolver = type_resolver.get(field_name, default_resolver)\n field_object.resolver = field_resolver\n\n\ndef add_resolve_function_to_scalar(name: str, obj: GraphQLObjectType, resolvers: dict):\n serializer = resolvers.get(name, obj.serialize)\n obj.serialize = serializer\n", "path": "ariadne/resolvers.py"}, {"content": "from .build_schema import build_schema_from_type_definitions\nfrom .executable_schema import make_executable_schema\nfrom .resolvers import add_resolve_functions_to_schema, default_resolver\n\n__all__ = [\n \"add_resolve_functions_to_schema\",\n \"build_schema_from_type_definitions\",\n \"default_resolver\",\n \"make_executable_schema\",\n]\n", "path": "ariadne/__init__.py"}]}
1,086
353
gh_patches_debug_5210
rasdani/github-patches
git_diff
fedora-infra__bodhi-3464
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Greenwave consumer should not process the compose results Currently the greenwave consumer consumes all the messages from `topic=org.fedoraproject.prod.greenwave.decision.update` but some messages on this topic are related to compose results and not koji build. The consumer should filter the messages based on the `subject_type`, and only process `koji_build` type. See a koji_build example https://apps.fedoraproject.org/datagrepper/id?id=2019-efb310ed-6acf-4742-809f-90a40c2a5855&is_raw=true&size=extra-large And a compose example https://apps.fedoraproject.org/datagrepper/id?id=2019-33002e05-241c-4180-8953-5a29cea4443d&is_raw=true&size=extra-large This is currently raising the following exception in production : ``` 2019-07-26 15:16:55,501 ERROR [bodhi.server][PoolThread-twisted.internet.reactor-0] Bodhi failed to send POST request to Greenwave at the following URL "https://greenwave-web-greenwave.app.os.fedoraproject.org/api/v1.0/decision". The status code was "502". The error was "{'message': 'Failed to retrieve SCM URL from Koji build "Fedora-Workstation-Rawhide-20190726.n.0" at "https://koji.fedoraproject.org/kojihub" (expected SCM URL in "source" attribute)'}". ``` </issue> <code> [start of bodhi/server/consumers/greenwave.py] 1 # Copyright © 2019 Red Hat, Inc. 2 # 3 # This file is part of Bodhi. 4 # 5 # This program is free software; you can redistribute it and/or 6 # modify it under the terms of the GNU General Public License 7 # as published by the Free Software Foundation; either version 2 8 # of the License, or (at your option) any later version. 9 # 10 # This program is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License along with 16 # this program; if not, write to the Free Software Foundation, Inc., 51 17 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 18 """ 19 The "greenwave handler". 20 21 This module is responsible for listening for messages from greenwave. 22 It then updates the policies of the build that greenwave checked. 23 """ 24 25 import logging 26 27 import fedora_messaging 28 29 from bodhi.server.models import Build 30 from bodhi.server.util import transactional_session_maker 31 32 log = logging.getLogger(__name__) 33 34 35 class GreenwaveHandler: 36 """ 37 The Bodhi Greenwave Handler. 38 39 A fedora-messaging listener waiting for messages from greenwave about enforced policies. 40 """ 41 42 def __init__(self): 43 """Initialize the GreenwaveHandler.""" 44 self.db_factory = transactional_session_maker() 45 46 def __call__(self, message: fedora_messaging.api.Message): 47 """Handle messages arriving with the configured topic.""" 48 msg = message.body 49 if not msg: 50 log.debug("Ignoring message without body.") 51 return 52 53 subject_identifier = msg.get("subject_identifier") 54 55 if subject_identifier is None: 56 log.debug("Couldn't find subject_identifier in Greenwave message") 57 return 58 59 with self.db_factory(): 60 61 build = Build.get(subject_identifier) 62 if build is None: 63 log.debug(f"Couldn't find build {subject_identifier} in DB") 64 return 65 66 log.info(f"Updating the test_gating_status for: {build.update.alias}") 67 build.update.update_test_gating_status() 68 [end of bodhi/server/consumers/greenwave.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bodhi/server/consumers/greenwave.py b/bodhi/server/consumers/greenwave.py --- a/bodhi/server/consumers/greenwave.py +++ b/bodhi/server/consumers/greenwave.py @@ -56,6 +56,11 @@ log.debug("Couldn't find subject_identifier in Greenwave message") return + subject_type = msg.get("subject_type") + if subject_type == "compose": + log.debug("Not requesting a decision for a compose") + return + with self.db_factory(): build = Build.get(subject_identifier)
{"golden_diff": "diff --git a/bodhi/server/consumers/greenwave.py b/bodhi/server/consumers/greenwave.py\n--- a/bodhi/server/consumers/greenwave.py\n+++ b/bodhi/server/consumers/greenwave.py\n@@ -56,6 +56,11 @@\n log.debug(\"Couldn't find subject_identifier in Greenwave message\")\n return\n \n+ subject_type = msg.get(\"subject_type\")\n+ if subject_type == \"compose\":\n+ log.debug(\"Not requesting a decision for a compose\")\n+ return\n+\n with self.db_factory():\n \n build = Build.get(subject_identifier)\n", "issue": "Greenwave consumer should not process the compose results\nCurrently the greenwave consumer consumes all the messages from `topic=org.fedoraproject.prod.greenwave.decision.update` but some messages on this topic are related to compose results and not koji build.\r\n\r\nThe consumer should filter the messages based on the `subject_type`, and only process `koji_build` type.\r\n\r\nSee a koji_build example https://apps.fedoraproject.org/datagrepper/id?id=2019-efb310ed-6acf-4742-809f-90a40c2a5855&is_raw=true&size=extra-large\r\n\r\nAnd a compose example https://apps.fedoraproject.org/datagrepper/id?id=2019-33002e05-241c-4180-8953-5a29cea4443d&is_raw=true&size=extra-large\r\n\r\nThis is currently raising the following exception in production : \r\n``` 2019-07-26 15:16:55,501 ERROR [bodhi.server][PoolThread-twisted.internet.reactor-0] Bodhi failed to send POST request to Greenwave at the following URL \"https://greenwave-web-greenwave.app.os.fedoraproject.org/api/v1.0/decision\". The status code was \"502\". The error was \"{'message': 'Failed to retrieve SCM URL from Koji build \"Fedora-Workstation-Rawhide-20190726.n.0\" at \"https://koji.fedoraproject.org/kojihub\" (expected SCM URL in \"source\" attribute)'}\". ```\n", "before_files": [{"content": "# Copyright \u00a9 2019 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe \"greenwave handler\".\n\nThis module is responsible for listening for messages from greenwave.\nIt then updates the policies of the build that greenwave checked.\n\"\"\"\n\nimport logging\n\nimport fedora_messaging\n\nfrom bodhi.server.models import Build\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger(__name__)\n\n\nclass GreenwaveHandler:\n \"\"\"\n The Bodhi Greenwave Handler.\n\n A fedora-messaging listener waiting for messages from greenwave about enforced policies.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the GreenwaveHandler.\"\"\"\n self.db_factory = transactional_session_maker()\n\n def __call__(self, message: fedora_messaging.api.Message):\n \"\"\"Handle messages arriving with the configured topic.\"\"\"\n msg = message.body\n if not msg:\n log.debug(\"Ignoring message without body.\")\n return\n\n subject_identifier = msg.get(\"subject_identifier\")\n\n if subject_identifier is None:\n log.debug(\"Couldn't find subject_identifier in Greenwave message\")\n return\n\n with self.db_factory():\n\n build = Build.get(subject_identifier)\n if build is None:\n log.debug(f\"Couldn't find build {subject_identifier} in DB\")\n return\n\n log.info(f\"Updating the test_gating_status for: {build.update.alias}\")\n build.update.update_test_gating_status()\n", "path": "bodhi/server/consumers/greenwave.py"}]}
1,534
140
gh_patches_debug_24389
rasdani/github-patches
git_diff
pulp__pulpcore-4189
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'." **Version** 3.16, but probably all versions **Describe the bug** - Reclaim space for repository fails with the following error. ~~~ Task paused with error: "("Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})" ~~~ **To Reproduce** See BZ **Additional context** https://bugzilla.redhat.com/show_bug.cgi?id=2169322 </issue> <code> [start of pulpcore/app/tasks/reclaim_space.py] 1 from pulpcore.app.models import ( 2 Artifact, 3 Content, 4 ContentArtifact, 5 ProgressReport, 6 PublishedMetadata, 7 Repository, 8 RepositoryVersion, 9 ) 10 from pulpcore.app.util import get_domain 11 12 13 def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False): 14 """ 15 This task frees-up disk space by removing Artifact files from the filesystem for Content 16 exclusive to the list of provided repos. 17 18 Note: content marked as `proctected` will be excluded from the reclaim disk space. 19 20 Kwargs: 21 repo_pks (list): A list of repo pks the disk reclaim space is performed on. 22 keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim 23 disk space. 24 force (bool): If True, uploaded content will be taken into account. 25 26 """ 27 reclaimed_repos = Repository.objects.filter(pk__in=repo_pks) 28 for repo in reclaimed_repos: 29 repo.invalidate_cache(everything=True) 30 31 domain = get_domain() 32 rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks) 33 c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos) 34 c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks) 35 c_reclaim_qs = c_reclaim_qs.exclude( 36 pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type() 37 ) 38 39 if keeplist_rv_pks: 40 rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks) 41 rv_content = Content.objects.none() 42 for rv in rv_qs.iterator(): 43 rv_content |= rv.content 44 c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content) 45 46 content_distinct = c_reclaim_qs.distinct("pulp_type") 47 unprotected = [] 48 for content in content_distinct: 49 if not content.cast().PROTECTED_FROM_RECLAIM: 50 unprotected.append(content.pulp_type) 51 52 ca_qs = ContentArtifact.objects.select_related("content", "artifact").filter( 53 content__in=c_reclaim_qs.values("pk"), artifact__isnull=False 54 ) 55 if not force: 56 ca_qs = ca_qs.filter(remoteartifact__isnull=False) 57 artifact_pks = set() 58 ca_to_update = [] 59 for ca in ca_qs.iterator(): 60 if ca.content.pulp_type in unprotected: 61 artifact_pks.add(ca.artifact.pk) 62 ca.artifact = None 63 ca_to_update.append(ca) 64 65 ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=["artifact"], batch_size=1000) 66 artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks) 67 progress_bar = ProgressReport( 68 message="Reclaim disk space", 69 total=artifacts_to_delete.count(), 70 code="reclaim-space.artifact", 71 done=0, 72 state="running", 73 ) 74 progress_bar.save() 75 76 counter = 0 77 interval = 100 78 for artifact in artifacts_to_delete.iterator(): 79 # we need to manually call delete() because it cleans up the file on the filesystem 80 artifact.delete() 81 progress_bar.done += 1 82 counter += 1 83 84 if counter >= interval: 85 progress_bar.save() 86 counter = 0 87 88 progress_bar.state = "completed" 89 progress_bar.save() 90 [end of pulpcore/app/tasks/reclaim_space.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py --- a/pulpcore/app/tasks/reclaim_space.py +++ b/pulpcore/app/tasks/reclaim_space.py @@ -1,3 +1,7 @@ +from logging import getLogger + +from django.db.models.deletion import ProtectedError + from pulpcore.app.models import ( Artifact, Content, @@ -9,6 +13,8 @@ ) from pulpcore.app.util import get_domain +log = getLogger(__name__) + def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False): """ @@ -76,10 +82,16 @@ counter = 0 interval = 100 for artifact in artifacts_to_delete.iterator(): - # we need to manually call delete() because it cleans up the file on the filesystem - artifact.delete() - progress_bar.done += 1 - counter += 1 + try: + # we need to manually call delete() because it cleans up the file on the filesystem + artifact.delete() + except ProtectedError as e: + # Rarely artifact could be shared between to different content units. + # Just log and skip the artifact deletion in this case + log.info(e) + else: + progress_bar.done += 1 + counter += 1 if counter >= interval: progress_bar.save()
{"golden_diff": "diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py\n--- a/pulpcore/app/tasks/reclaim_space.py\n+++ b/pulpcore/app/tasks/reclaim_space.py\n@@ -1,3 +1,7 @@\n+from logging import getLogger\n+\n+from django.db.models.deletion import ProtectedError\n+\n from pulpcore.app.models import (\n Artifact,\n Content,\n@@ -9,6 +13,8 @@\n )\n from pulpcore.app.util import get_domain\n \n+log = getLogger(__name__)\n+\n \n def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n@@ -76,10 +82,16 @@\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n- # we need to manually call delete() because it cleans up the file on the filesystem\n- artifact.delete()\n- progress_bar.done += 1\n- counter += 1\n+ try:\n+ # we need to manually call delete() because it cleans up the file on the filesystem\n+ artifact.delete()\n+ except ProtectedError as e:\n+ # Rarely artifact could be shared between to different content units.\n+ # Just log and skip the artifact deletion in this case\n+ log.info(e)\n+ else:\n+ progress_bar.done += 1\n+ counter += 1\n \n if counter >= interval:\n progress_bar.save()\n", "issue": "Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\"\n**Version**\r\n3.16, but probably all versions\r\n\r\n**Describe the bug**\r\n\r\n- Reclaim space for repository fails with the following error.\r\n\r\n ~~~\r\n Task paused with error: \"(\"Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})\"\r\n ~~~\r\n\r\n**To Reproduce**\r\nSee BZ\r\n\r\n**Additional context**\r\nhttps://bugzilla.redhat.com/show_bug.cgi?id=2169322\r\n\n", "before_files": [{"content": "from pulpcore.app.models import (\n Artifact,\n Content,\n ContentArtifact,\n ProgressReport,\n PublishedMetadata,\n Repository,\n RepositoryVersion,\n)\nfrom pulpcore.app.util import get_domain\n\n\ndef reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n This task frees-up disk space by removing Artifact files from the filesystem for Content\n exclusive to the list of provided repos.\n\n Note: content marked as `proctected` will be excluded from the reclaim disk space.\n\n Kwargs:\n repo_pks (list): A list of repo pks the disk reclaim space is performed on.\n keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim\n disk space.\n force (bool): If True, uploaded content will be taken into account.\n\n \"\"\"\n reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)\n for repo in reclaimed_repos:\n repo.invalidate_cache(everything=True)\n\n domain = get_domain()\n rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks)\n c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)\n c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)\n c_reclaim_qs = c_reclaim_qs.exclude(\n pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()\n )\n\n if keeplist_rv_pks:\n rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)\n rv_content = Content.objects.none()\n for rv in rv_qs.iterator():\n rv_content |= rv.content\n c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)\n\n content_distinct = c_reclaim_qs.distinct(\"pulp_type\")\n unprotected = []\n for content in content_distinct:\n if not content.cast().PROTECTED_FROM_RECLAIM:\n unprotected.append(content.pulp_type)\n\n ca_qs = ContentArtifact.objects.select_related(\"content\", \"artifact\").filter(\n content__in=c_reclaim_qs.values(\"pk\"), artifact__isnull=False\n )\n if not force:\n ca_qs = ca_qs.filter(remoteartifact__isnull=False)\n artifact_pks = set()\n ca_to_update = []\n for ca in ca_qs.iterator():\n if ca.content.pulp_type in unprotected:\n artifact_pks.add(ca.artifact.pk)\n ca.artifact = None\n ca_to_update.append(ca)\n\n ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=[\"artifact\"], batch_size=1000)\n artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)\n progress_bar = ProgressReport(\n message=\"Reclaim disk space\",\n total=artifacts_to_delete.count(),\n code=\"reclaim-space.artifact\",\n done=0,\n state=\"running\",\n )\n progress_bar.save()\n\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n # we need to manually call delete() because it cleans up the file on the filesystem\n artifact.delete()\n progress_bar.done += 1\n counter += 1\n\n if counter >= interval:\n progress_bar.save()\n counter = 0\n\n progress_bar.state = \"completed\"\n progress_bar.save()\n", "path": "pulpcore/app/tasks/reclaim_space.py"}]}
1,628
323
gh_patches_debug_49360
rasdani/github-patches
git_diff
zulip__zulip-5407
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Pull GCI docs into main Zulip repo. Some of our assets, like git cheatsheets, currently live in the zulip-gci repo. We should move them to the main Zulip repo and link from appropriate places. cc @synicalsyntax @lonerz @tommyip </issue> <code> [start of tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py] 1 #!/usr/bin/env python 2 from __future__ import print_function 3 4 import logging 5 import re 6 import scrapy 7 8 from scrapy import Request 9 from scrapy.linkextractors import IGNORED_EXTENSIONS 10 from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor 11 from scrapy.utils.url import url_has_any_extension 12 13 from typing import Any, Generator, List, Optional, Tuple 14 15 EXCLUDED_URLS = [ 16 # Google calendar returns 404s on HEAD requests unconditionally 17 'https://calendar.google.com/calendar/[email protected]', 18 # Returns 409 errors to HEAD requests frequently 19 'https://medium.freecodecamp.com', 20 ] 21 22 23 class BaseDocumentationSpider(scrapy.Spider): 24 name = None # type: Optional[str] 25 # Exclude domain address. 26 deny_domains = [] # type: List[str] 27 start_urls = [] # type: List[str] 28 deny = [] # type: List[str] 29 file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str] 30 tags = ('a', 'area', 'img') 31 attrs = ('href', 'src') 32 33 def __init__(self, *args, **kwargs): 34 # type: (*Any, **Any) -> None 35 super(BaseDocumentationSpider, self).__init__(*args, **kwargs) 36 self.has_error = False 37 38 def _set_error_state(self): 39 # type: () -> None 40 self.has_error = True 41 42 def _has_extension(self, url): 43 # type: (str) -> bool 44 return url_has_any_extension(url, self.file_extensions) 45 46 def _is_external_url(self, url): 47 # type: (str) -> bool 48 return url.startswith('http') or self._has_extension(url) 49 50 def check_existing(self, response): 51 # type: (Any) -> None 52 self.log(response) 53 54 def check_permalink(self, response): 55 # type: (Any) -> None 56 self.log(response) 57 xpath_template = "//*[@id='{permalink}' or @name='{permalink}']" 58 m = re.match(r".+\#(?P<permalink>.*)$", response.request.url) # Get anchor value. 59 if not m: 60 return 61 permalink = m.group('permalink') 62 # Check permalink existing on response page. 63 if not response.selector.xpath(xpath_template.format(permalink=permalink)): 64 self._set_error_state() 65 raise Exception( 66 "Permalink #{} is not found on page {}".format(permalink, response.request.url)) 67 68 def parse(self, response): 69 # type: (Any) -> Generator[Request, None, None] 70 self.log(response) 71 for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'], 72 tags=self.tags, attrs=self.attrs, deny=self.deny, 73 canonicalize=False).extract_links(response): 74 callback = self.parse # type: Any 75 dont_filter = False 76 method = 'GET' 77 if self._is_external_url(link.url): 78 callback = self.check_existing 79 method = 'HEAD' 80 elif '#' in link.url: 81 dont_filter = True 82 callback = self.check_permalink 83 yield Request(link.url, method=method, callback=callback, dont_filter=dont_filter, 84 errback=self.error_callback) 85 86 def retry_request_with_get(self, request): 87 # type: (Request) -> Generator[Request, None, None] 88 request.method = 'GET' 89 request.dont_filter = True 90 yield request 91 92 def exclude_error(self, url): 93 # type: (str) -> bool 94 if url in EXCLUDED_URLS: 95 return True 96 return False 97 98 def error_callback(self, failure): 99 # type: (Any) -> Optional[Generator[Any, None, None]] 100 if hasattr(failure.value, 'response') and failure.value.response: 101 response = failure.value.response 102 if self.exclude_error(response.url): 103 return None 104 if response.status == 404: 105 self._set_error_state() 106 raise Exception('Page not found: {}'.format(response)) 107 if response.status == 405 and response.request.method == 'HEAD': 108 # Method 'HEAD' not allowed, repeat request with 'GET' 109 return self.retry_request_with_get(response.request) 110 self.log("Error! Please check link: {}".format(response), logging.ERROR) 111 elif isinstance(failure.type, IOError): 112 self._set_error_state() 113 else: 114 raise Exception(failure.value) 115 return None 116 [end of tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py --- a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py +++ b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py @@ -17,6 +17,8 @@ 'https://calendar.google.com/calendar/[email protected]', # Returns 409 errors to HEAD requests frequently 'https://medium.freecodecamp.com', + # Returns 404 to HEAD requests unconditionally + 'https://www.git-tower.com/blog/command-line-cheat-sheet/', ]
{"golden_diff": "diff --git a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py\n--- a/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py\n+++ b/tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py\n@@ -17,6 +17,8 @@\n 'https://calendar.google.com/calendar/[email protected]',\n # Returns 409 errors to HEAD requests frequently\n 'https://medium.freecodecamp.com',\n+ # Returns 404 to HEAD requests unconditionally\n+ 'https://www.git-tower.com/blog/command-line-cheat-sheet/',\n ]\n", "issue": "Pull GCI docs into main Zulip repo.\nSome of our assets, like git cheatsheets, currently live in the zulip-gci repo. We should move them to the main Zulip repo and link from appropriate places.\r\n\r\ncc @synicalsyntax @lonerz @tommyip \n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport logging\nimport re\nimport scrapy\n\nfrom scrapy import Request\nfrom scrapy.linkextractors import IGNORED_EXTENSIONS\nfrom scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor\nfrom scrapy.utils.url import url_has_any_extension\n\nfrom typing import Any, Generator, List, Optional, Tuple\n\nEXCLUDED_URLS = [\n # Google calendar returns 404s on HEAD requests unconditionally\n 'https://calendar.google.com/calendar/[email protected]',\n # Returns 409 errors to HEAD requests frequently\n 'https://medium.freecodecamp.com',\n]\n\n\nclass BaseDocumentationSpider(scrapy.Spider):\n name = None # type: Optional[str]\n # Exclude domain address.\n deny_domains = [] # type: List[str]\n start_urls = [] # type: List[str]\n deny = [] # type: List[str]\n file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str]\n tags = ('a', 'area', 'img')\n attrs = ('href', 'src')\n\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n super(BaseDocumentationSpider, self).__init__(*args, **kwargs)\n self.has_error = False\n\n def _set_error_state(self):\n # type: () -> None\n self.has_error = True\n\n def _has_extension(self, url):\n # type: (str) -> bool\n return url_has_any_extension(url, self.file_extensions)\n\n def _is_external_url(self, url):\n # type: (str) -> bool\n return url.startswith('http') or self._has_extension(url)\n\n def check_existing(self, response):\n # type: (Any) -> None\n self.log(response)\n\n def check_permalink(self, response):\n # type: (Any) -> None\n self.log(response)\n xpath_template = \"//*[@id='{permalink}' or @name='{permalink}']\"\n m = re.match(r\".+\\#(?P<permalink>.*)$\", response.request.url) # Get anchor value.\n if not m:\n return\n permalink = m.group('permalink')\n # Check permalink existing on response page.\n if not response.selector.xpath(xpath_template.format(permalink=permalink)):\n self._set_error_state()\n raise Exception(\n \"Permalink #{} is not found on page {}\".format(permalink, response.request.url))\n\n def parse(self, response):\n # type: (Any) -> Generator[Request, None, None]\n self.log(response)\n for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],\n tags=self.tags, attrs=self.attrs, deny=self.deny,\n canonicalize=False).extract_links(response):\n callback = self.parse # type: Any\n dont_filter = False\n method = 'GET'\n if self._is_external_url(link.url):\n callback = self.check_existing\n method = 'HEAD'\n elif '#' in link.url:\n dont_filter = True\n callback = self.check_permalink\n yield Request(link.url, method=method, callback=callback, dont_filter=dont_filter,\n errback=self.error_callback)\n\n def retry_request_with_get(self, request):\n # type: (Request) -> Generator[Request, None, None]\n request.method = 'GET'\n request.dont_filter = True\n yield request\n\n def exclude_error(self, url):\n # type: (str) -> bool\n if url in EXCLUDED_URLS:\n return True\n return False\n\n def error_callback(self, failure):\n # type: (Any) -> Optional[Generator[Any, None, None]]\n if hasattr(failure.value, 'response') and failure.value.response:\n response = failure.value.response\n if self.exclude_error(response.url):\n return None\n if response.status == 404:\n self._set_error_state()\n raise Exception('Page not found: {}'.format(response))\n if response.status == 405 and response.request.method == 'HEAD':\n # Method 'HEAD' not allowed, repeat request with 'GET'\n return self.retry_request_with_get(response.request)\n self.log(\"Error! Please check link: {}\".format(response), logging.ERROR)\n elif isinstance(failure.type, IOError):\n self._set_error_state()\n else:\n raise Exception(failure.value)\n return None\n", "path": "tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py"}]}
1,853
169
gh_patches_debug_28640
rasdani/github-patches
git_diff
microsoft__torchgeo-316
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add "Launch in Hub" button to tutorials We currently have a "Open in Colab" button for our tutorials. We should also have one for the Planetary Computer. See https://planetarycomputer.microsoft.com/dataset/landsat-8-c2-l2#Example-Notebook for an example of how this works. </issue> <code> [start of docs/conf.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 # Configuration file for the Sphinx documentation builder. 5 # 6 # This file only contains a selection of the most common options. For a full 7 # list see the documentation: 8 # https://www.sphinx-doc.org/en/master/usage/configuration.html 9 10 # -- Path setup -------------------------------------------------------------- 11 12 import os 13 import sys 14 15 import pytorch_sphinx_theme 16 17 # If extensions (or modules to document with autodoc) are in another directory, 18 # add these directories to sys.path here. If the directory is relative to the 19 # documentation root, use os.path.abspath to make it absolute, like shown here. 20 sys.path.insert(0, os.path.abspath("..")) 21 22 import torchgeo # noqa: E402 23 24 # -- Project information ----------------------------------------------------- 25 26 project = "torchgeo" 27 copyright = "2021, Microsoft Corporation" 28 author = torchgeo.__author__ 29 version = ".".join(torchgeo.__version__.split(".")[:2]) 30 release = torchgeo.__version__ 31 32 33 # -- General configuration --------------------------------------------------- 34 35 # Add any Sphinx extension module names here, as strings. They can be 36 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 37 # ones. 38 extensions = [ 39 "sphinx.ext.autodoc", 40 "sphinx.ext.intersphinx", 41 "sphinx.ext.napoleon", 42 "sphinx.ext.todo", 43 "sphinx.ext.viewcode", 44 "nbsphinx", 45 ] 46 47 # List of patterns, relative to source directory, that match files and 48 # directories to ignore when looking for source files. 49 # This pattern also affects html_static_path and html_extra_path. 50 exclude_patterns = ["_build"] 51 52 # Sphinx 3.0+ required for: 53 # autodoc_typehints = "description" 54 needs_sphinx = "3.0" 55 56 nitpicky = True 57 nitpick_ignore = [ 58 # https://github.com/sphinx-doc/sphinx/issues/8127 59 ("py:class", ".."), 60 # TODO: can't figure out why this isn't found 61 ("py:class", "LightningDataModule"), 62 # Undocumented class 63 ("py:class", "torchvision.models.resnet.ResNet"), 64 ] 65 66 67 # -- Options for HTML output ------------------------------------------------- 68 69 # The theme to use for HTML and HTML Help pages. See the documentation for 70 # a list of builtin themes. 71 html_theme = "pytorch_sphinx_theme" 72 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] 73 74 # Theme options are theme-specific and customize the look and feel of a theme 75 # further. For a list of options available for each theme, see the 76 # documentation. 77 html_theme_options = { 78 "collapse_navigation": False, 79 "display_version": True, 80 "logo_only": True, 81 "pytorch_project": "docs", 82 "navigation_with_keys": True, 83 "analytics_id": "UA-209075005-1", 84 } 85 86 html_favicon = os.path.join("..", "logo", "favicon.ico") 87 88 html_static_path = ["_static"] 89 html_css_files = ["workaround.css"] 90 91 # -- Extension configuration ------------------------------------------------- 92 93 # sphinx.ext.autodoc 94 autodoc_default_options = { 95 "members": True, 96 "special-members": True, 97 "show-inheritance": True, 98 } 99 autodoc_member_order = "bysource" 100 autodoc_typehints = "description" 101 102 # sphinx.ext.intersphinx 103 intersphinx_mapping = { 104 "matplotlib": ("https://matplotlib.org/stable/", None), 105 "python": ("https://docs.python.org/3", None), 106 "pytorch-lightning": ("https://pytorch-lightning.readthedocs.io/en/latest/", None), 107 "rasterio": ("https://rasterio.readthedocs.io/en/latest/", None), 108 "rtree": ("https://rtree.readthedocs.io/en/latest/", None), 109 "torch": ("https://pytorch.org/docs/stable", None), 110 "torchvision": ("https://pytorch.org/vision/stable", None), 111 } 112 113 # nbsphinx 114 nbsphinx_execute = "never" 115 # TODO: branch/tag should change depending on which version of docs you look at 116 # TODO: width option of image directive is broken, see: 117 # https://github.com/pytorch/pytorch_sphinx_theme/issues/140 118 nbsphinx_prolog = """ 119 {% set colab = "https://colab.research.google.com" %} 120 {% set repo = "microsoft/torchgeo" %} 121 {% if "dev" in env.config.release %} 122 {% set branch = "main" %} 123 {% else %} 124 {% set branch = "releases/v" ~ env.config.version %} 125 {% endif %} 126 127 .. image:: {{ colab }}/assets/colab-badge.svg 128 :class: colabbadge 129 :alt: Open in Colab 130 :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb 131 """ 132 133 # Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme 134 # See more information here https://github.com/spatialaudio/nbsphinx/issues/599 135 # NOTE: This will likely break nbsphinx widgets 136 nbsphinx_requirejs_path = "" 137 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -116,18 +116,35 @@ # TODO: width option of image directive is broken, see: # https://github.com/pytorch/pytorch_sphinx_theme/issues/140 nbsphinx_prolog = """ -{% set colab = "https://colab.research.google.com" %} +{% set host = "https://colab.research.google.com" %} {% set repo = "microsoft/torchgeo" %} +{% set urlpath = "docs/" ~ env.docname ~ ".ipynb" %} {% if "dev" in env.config.release %} {% set branch = "main" %} {% else %} {% set branch = "releases/v" ~ env.config.version %} {% endif %} -.. image:: {{ colab }}/assets/colab-badge.svg +.. image:: {{ host }}/assets/colab-badge.svg :class: colabbadge :alt: Open in Colab - :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb + :target: {{ host }}/github/{{ repo }}/blob/{{ branch }}/{{ urlpath }} + +{% set host = "https://pccompute.westeurope.cloudapp.azure.com" %} +{% set host = host ~ "/compute/hub/user-redirect/git-pull" %} +{% set repo = "https%3A%2F%2Fgithub.com%2Fmicrosoft%2Ftorchgeo" %} +{% set urlpath = "tree%2Ftorchgeo%2Fdocs%2F" %} +{% set urlpath = urlpath ~ env.docname | replace("/", "%2F") ~ ".ipynb" %} +{% if "dev" in env.config.release %} + {% set branch = "main" %} +{% else %} + {% set branch = "releases%2Fv" ~ env.config.version %} +{% endif %} + +.. image:: https://img.shields.io/badge/-Open%20on%20Planetary%20Computer-blue + :class: colabbadge + :alt: Open on Planetary Computer + :target: {{ host }}?repo={{ repo }}&urlpath={{ urlpath }}&branch={{ branch }} """ # Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -116,18 +116,35 @@\n # TODO: width option of image directive is broken, see:\n # https://github.com/pytorch/pytorch_sphinx_theme/issues/140\n nbsphinx_prolog = \"\"\"\n-{% set colab = \"https://colab.research.google.com\" %}\n+{% set host = \"https://colab.research.google.com\" %}\n {% set repo = \"microsoft/torchgeo\" %}\n+{% set urlpath = \"docs/\" ~ env.docname ~ \".ipynb\" %}\n {% if \"dev\" in env.config.release %}\n {% set branch = \"main\" %}\n {% else %}\n {% set branch = \"releases/v\" ~ env.config.version %}\n {% endif %}\n \n-.. image:: {{ colab }}/assets/colab-badge.svg\n+.. image:: {{ host }}/assets/colab-badge.svg\n :class: colabbadge\n :alt: Open in Colab\n- :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb\n+ :target: {{ host }}/github/{{ repo }}/blob/{{ branch }}/{{ urlpath }}\n+\n+{% set host = \"https://pccompute.westeurope.cloudapp.azure.com\" %}\n+{% set host = host ~ \"/compute/hub/user-redirect/git-pull\" %}\n+{% set repo = \"https%3A%2F%2Fgithub.com%2Fmicrosoft%2Ftorchgeo\" %}\n+{% set urlpath = \"tree%2Ftorchgeo%2Fdocs%2F\" %}\n+{% set urlpath = urlpath ~ env.docname | replace(\"/\", \"%2F\") ~ \".ipynb\" %}\n+{% if \"dev\" in env.config.release %}\n+ {% set branch = \"main\" %}\n+{% else %}\n+ {% set branch = \"releases%2Fv\" ~ env.config.version %}\n+{% endif %}\n+\n+.. image:: https://img.shields.io/badge/-Open%20on%20Planetary%20Computer-blue\n+ :class: colabbadge\n+ :alt: Open on Planetary Computer\n+ :target: {{ host }}?repo={{ repo }}&urlpath={{ urlpath }}&branch={{ branch }}\n \"\"\"\n \n # Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme\n", "issue": "Add \"Launch in Hub\" button to tutorials\nWe currently have a \"Open in Colab\" button for our tutorials. We should also have one for the Planetary Computer. See https://planetarycomputer.microsoft.com/dataset/landsat-8-c2-l2#Example-Notebook for an example of how this works.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\n\nimport pytorch_sphinx_theme\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nimport torchgeo # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"torchgeo\"\ncopyright = \"2021, Microsoft Corporation\"\nauthor = torchgeo.__author__\nversion = \".\".join(torchgeo.__version__.split(\".\")[:2])\nrelease = torchgeo.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"nbsphinx\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\"]\n\n# Sphinx 3.0+ required for:\n# autodoc_typehints = \"description\"\nneeds_sphinx = \"3.0\"\n\nnitpicky = True\nnitpick_ignore = [\n # https://github.com/sphinx-doc/sphinx/issues/8127\n (\"py:class\", \"..\"),\n # TODO: can't figure out why this isn't found\n (\"py:class\", \"LightningDataModule\"),\n # Undocumented class\n (\"py:class\", \"torchvision.models.resnet.ResNet\"),\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"pytorch_project\": \"docs\",\n \"navigation_with_keys\": True,\n \"analytics_id\": \"UA-209075005-1\",\n}\n\nhtml_favicon = os.path.join(\"..\", \"logo\", \"favicon.ico\")\n\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"workaround.css\"]\n\n# -- Extension configuration -------------------------------------------------\n\n# sphinx.ext.autodoc\nautodoc_default_options = {\n \"members\": True,\n \"special-members\": True,\n \"show-inheritance\": True,\n}\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\n\n# sphinx.ext.intersphinx\nintersphinx_mapping = {\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"python\": (\"https://docs.python.org/3\", None),\n \"pytorch-lightning\": (\"https://pytorch-lightning.readthedocs.io/en/latest/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/latest/\", None),\n \"rtree\": (\"https://rtree.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n \"torchvision\": (\"https://pytorch.org/vision/stable\", None),\n}\n\n# nbsphinx\nnbsphinx_execute = \"never\"\n# TODO: branch/tag should change depending on which version of docs you look at\n# TODO: width option of image directive is broken, see:\n# https://github.com/pytorch/pytorch_sphinx_theme/issues/140\nnbsphinx_prolog = \"\"\"\n{% set colab = \"https://colab.research.google.com\" %}\n{% set repo = \"microsoft/torchgeo\" %}\n{% if \"dev\" in env.config.release %}\n {% set branch = \"main\" %}\n{% else %}\n {% set branch = \"releases/v\" ~ env.config.version %}\n{% endif %}\n\n.. image:: {{ colab }}/assets/colab-badge.svg\n :class: colabbadge\n :alt: Open in Colab\n :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb\n\"\"\"\n\n# Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme\n# See more information here https://github.com/spatialaudio/nbsphinx/issues/599\n# NOTE: This will likely break nbsphinx widgets\nnbsphinx_requirejs_path = \"\"\n", "path": "docs/conf.py"}]}
2,042
543