problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_32213
rasdani/github-patches
git_diff
mesonbuild__meson-840
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> setup.py: Install scripts without extensions on UNIX-like platforms Because of issue #394 , meson install scripts as `xxx.py` , but in linux, install script with a extension name is no a good practice. And change the installed script name also break some package depend on meson. Could you deal with it for different platform? thx. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python3 2 3 # Copyright 2016 The Meson development team 4 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 9 # http://www.apache.org/licenses/LICENSE-2.0 10 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 import sys 18 19 if sys.version_info[0] < 3: 20 print('Tried to install with Python 2, Meson only supports Python 3.') 21 sys.exit(1) 22 23 # We need to support Python installations that have nothing but the basic 24 # Python installation. Use setuptools when possible and fall back to 25 # plain distutils when setuptools is not available. 26 try: 27 from setuptools import setup 28 except ImportError: 29 from distutils.core import setup 30 31 from mesonbuild.coredata import version 32 33 setup(name='meson', 34 version=version, 35 description='A high performance build system', 36 author='Jussi Pakkanen', 37 author_email='[email protected]', 38 url='http://mesonbuild.com', 39 license=' Apache License, Version 2.0', 40 packages=['mesonbuild', 41 'mesonbuild.modules', 42 'mesonbuild.scripts', 43 'mesonbuild.backend', 44 'mesonbuild.wrap'], 45 scripts=['meson.py', 46 'mesonconf.py', 47 'mesonintrospect.py', 48 'wraptool.py'], 49 data_files=[('share/man/man1', ['man/meson.1', 50 'man/mesonconf.1', 51 'man/mesonintrospect.1', 52 'man/wraptool.1'])], 53 classifiers=['Development Status :: 5 - Production/Stable', 54 'Environment :: Console', 55 'Intended Audience :: Developers', 56 'License :: OSI Approved :: Apache Software License', 57 'Natural Language :: English', 58 'Operating System :: MacOS :: MacOS X', 59 'Operating System :: Microsoft :: Windows', 60 'Operating System :: POSIX :: BSD', 61 'Operating System :: POSIX :: Linux', 62 'Programming Language :: Python :: 3 :: Only', 63 'Topic :: Software Development :: Build Tools', 64 ], 65 long_description='''Meson is a cross-platform build system designed to be both as 66 fast and as user friendly as possible. It supports many languages and compilers, including 67 GCC, Clang and Visual Studio. Its build definitions are written in a simple non-turing 68 complete DSL.''') 69 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -14,7 +14,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import sys +from os import path if sys.version_info[0] < 3: print('Tried to install with Python 2, Meson only supports Python 3.') @@ -25,8 +27,32 @@ # plain distutils when setuptools is not available. try: from setuptools import setup + from setuptools.command.install_scripts import install_scripts as orig except ImportError: from distutils.core import setup + from distutils.command.install_scripts import install_scripts as orig + +from distutils.file_util import copy_file +from distutils.dir_util import mkpath +from stat import ST_MODE + +class install_scripts(orig): + def run(self): + if sys.platform == 'win32': + super().run() + return + + self.outfiles = [] + if not self.dry_run: + mkpath(self.install_dir) + + # We want the files to be installed without a suffix on Unix + for infile in self.get_inputs(): + in_stripped = infile[:-3] if infile.endswith('.py') else infile + outfile = path.join(self.install_dir, in_stripped) + # NOTE: Mode is preserved by default + copy_file(infile, outfile, dry_run=self.dry_run) + self.outfiles.append(outfile) from mesonbuild.coredata import version @@ -46,6 +72,7 @@ 'mesonconf.py', 'mesonintrospect.py', 'wraptool.py'], + cmdclass={'install_scripts': install_scripts}, data_files=[('share/man/man1', ['man/meson.1', 'man/mesonconf.1', 'man/mesonintrospect.1',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,7 +14,9 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import os\n import sys\n+from os import path\n \n if sys.version_info[0] < 3:\n print('Tried to install with Python 2, Meson only supports Python 3.')\n@@ -25,8 +27,32 @@\n # plain distutils when setuptools is not available.\n try:\n from setuptools import setup\n+ from setuptools.command.install_scripts import install_scripts as orig\n except ImportError:\n from distutils.core import setup\n+ from distutils.command.install_scripts import install_scripts as orig\n+\n+from distutils.file_util import copy_file\n+from distutils.dir_util import mkpath\n+from stat import ST_MODE\n+\n+class install_scripts(orig):\n+ def run(self):\n+ if sys.platform == 'win32':\n+ super().run()\n+ return\n+\n+ self.outfiles = []\n+ if not self.dry_run:\n+ mkpath(self.install_dir)\n+\n+ # We want the files to be installed without a suffix on Unix\n+ for infile in self.get_inputs():\n+ in_stripped = infile[:-3] if infile.endswith('.py') else infile\n+ outfile = path.join(self.install_dir, in_stripped)\n+ # NOTE: Mode is preserved by default\n+ copy_file(infile, outfile, dry_run=self.dry_run)\n+ self.outfiles.append(outfile)\n \n from mesonbuild.coredata import version\n \n@@ -46,6 +72,7 @@\n 'mesonconf.py',\n 'mesonintrospect.py',\n 'wraptool.py'],\n+ cmdclass={'install_scripts': install_scripts},\n data_files=[('share/man/man1', ['man/meson.1',\n 'man/mesonconf.1',\n 'man/mesonintrospect.1',\n", "issue": "setup.py: Install scripts without extensions on UNIX-like platforms\nBecause of issue #394 , meson install scripts as `xxx.py` , but in linux, install script with a extension name is no a good practice. And change the installed script name also break some package depend on meson.\n\nCould you deal with it for different platform?\n\nthx.\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright 2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nif sys.version_info[0] < 3:\n print('Tried to install with Python 2, Meson only supports Python 3.')\n sys.exit(1)\n\n# We need to support Python installations that have nothing but the basic\n# Python installation. Use setuptools when possible and fall back to\n# plain distutils when setuptools is not available.\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nfrom mesonbuild.coredata import version\n\nsetup(name='meson',\n version=version,\n description='A high performance build system',\n author='Jussi Pakkanen',\n author_email='[email protected]',\n url='http://mesonbuild.com',\n license=' Apache License, Version 2.0',\n packages=['mesonbuild',\n 'mesonbuild.modules',\n 'mesonbuild.scripts',\n 'mesonbuild.backend',\n 'mesonbuild.wrap'],\n scripts=['meson.py',\n 'mesonconf.py',\n 'mesonintrospect.py',\n 'wraptool.py'],\n data_files=[('share/man/man1', ['man/meson.1',\n 'man/mesonconf.1',\n 'man/mesonintrospect.1',\n 'man/wraptool.1'])],\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: BSD',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Build Tools',\n ],\n long_description='''Meson is a cross-platform build system designed to be both as\nfast and as user friendly as possible. It supports many languages and compilers, including\nGCC, Clang and Visual Studio. Its build definitions are written in a simple non-turing\ncomplete DSL.''')\n", "path": "setup.py"}]}
1,322
437
gh_patches_debug_16211
rasdani/github-patches
git_diff
google__jax-326
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> jax missing scipy.special.expit Would be possible to add gradients for `expit` and `logit`? </issue> <code> [start of jax/scipy/special.py] 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from __future__ import absolute_import 16 from __future__ import division 17 from __future__ import print_function 18 19 import scipy.special as osp_special 20 21 from .. import lax 22 from ..numpy.lax_numpy import _wraps 23 24 25 # need to create new functions because _wraps sets the __name__ attribute 26 gammaln = _wraps(osp_special.gammaln)(lambda x: lax.lgamma(x)) 27 digamma = _wraps(osp_special.digamma)(lambda x: lax.digamma(x)) 28 erf = _wraps(osp_special.erf)(lambda x: lax.erf(x)) 29 erfc = _wraps(osp_special.erfc)(lambda x: lax.erfc(x)) 30 erfinv = _wraps(osp_special.erfinv)(lambda x: lax.erf_inv(x)) 31 [end of jax/scipy/special.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/jax/scipy/special.py b/jax/scipy/special.py --- a/jax/scipy/special.py +++ b/jax/scipy/special.py @@ -19,7 +19,7 @@ import scipy.special as osp_special from .. import lax -from ..numpy.lax_numpy import _wraps +from ..numpy.lax_numpy import _wraps, asarray # need to create new functions because _wraps sets the __name__ attribute @@ -28,3 +28,16 @@ erf = _wraps(osp_special.erf)(lambda x: lax.erf(x)) erfc = _wraps(osp_special.erfc)(lambda x: lax.erfc(x)) erfinv = _wraps(osp_special.erfinv)(lambda x: lax.erf_inv(x)) + + +@_wraps(osp_special.logit) +def logit(x): + x = asarray(x) + return lax.log(lax.div(x, lax.sub(lax._const(x, 1), x))) + + +@_wraps(osp_special.expit) +def expit(x): + x = asarray(x) + one = lax._const(x, 1) + return lax.div(one, lax.add(one, lax.exp(lax.neg(x))))
{"golden_diff": "diff --git a/jax/scipy/special.py b/jax/scipy/special.py\n--- a/jax/scipy/special.py\n+++ b/jax/scipy/special.py\n@@ -19,7 +19,7 @@\n import scipy.special as osp_special\n \n from .. import lax\n-from ..numpy.lax_numpy import _wraps\n+from ..numpy.lax_numpy import _wraps, asarray\n \n \n # need to create new functions because _wraps sets the __name__ attribute\n@@ -28,3 +28,16 @@\n erf = _wraps(osp_special.erf)(lambda x: lax.erf(x))\n erfc = _wraps(osp_special.erfc)(lambda x: lax.erfc(x))\n erfinv = _wraps(osp_special.erfinv)(lambda x: lax.erf_inv(x))\n+\n+\n+@_wraps(osp_special.logit)\n+def logit(x):\n+ x = asarray(x)\n+ return lax.log(lax.div(x, lax.sub(lax._const(x, 1), x)))\n+\n+\n+@_wraps(osp_special.expit)\n+def expit(x):\n+ x = asarray(x)\n+ one = lax._const(x, 1)\n+ return lax.div(one, lax.add(one, lax.exp(lax.neg(x))))\n", "issue": "jax missing scipy.special.expit\nWould be possible to add gradients for `expit` and `logit`?\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport scipy.special as osp_special\n\nfrom .. import lax\nfrom ..numpy.lax_numpy import _wraps\n\n\n# need to create new functions because _wraps sets the __name__ attribute\ngammaln = _wraps(osp_special.gammaln)(lambda x: lax.lgamma(x))\ndigamma = _wraps(osp_special.digamma)(lambda x: lax.digamma(x))\nerf = _wraps(osp_special.erf)(lambda x: lax.erf(x))\nerfc = _wraps(osp_special.erfc)(lambda x: lax.erfc(x))\nerfinv = _wraps(osp_special.erfinv)(lambda x: lax.erf_inv(x))\n", "path": "jax/scipy/special.py"}]}
922
291
gh_patches_debug_516
rasdani/github-patches
git_diff
meltano__meltano-7210
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> feature: Python 3.11 support ### Feature scope Other ### Description Python 3.11.0 is planned to be officially released as of 2022-10-24. We should add it to our test matrix, and build Docker images for it for each release. </issue> <code> [start of noxfile.py] 1 """Nox configuration.""" 2 3 from __future__ import annotations 4 5 import os 6 import sys 7 from pathlib import Path 8 from random import randint 9 from textwrap import dedent 10 11 try: 12 from nox_poetry import Session 13 from nox_poetry import session as nox_session 14 except ImportError: 15 message = f"""\ 16 Nox failed to import the 'nox-poetry' package. 17 Please install it using the following command: 18 {sys.executable} -m pip install nox-poetry""" 19 raise SystemExit(dedent(message)) from None 20 21 22 package = "meltano" 23 python_versions = ["3.10", "3.9", "3.8", "3.7"] 24 main_python_version = "3.9" 25 locations = "src", "tests", "noxfile.py" 26 27 28 @nox_session(python=python_versions) 29 def tests(session: Session) -> None: 30 """Execute pytest tests and compute coverage. 31 32 Args: 33 session: Nox session. 34 """ 35 backend_db = os.environ.get("PYTEST_BACKEND", "sqlite") 36 37 if backend_db == "mssql": 38 session.install(".[mssql,azure,gcs,s3]") 39 40 else: 41 session.install(".[azure,gcs,s3]") 42 43 session.install( 44 "colorama", # colored output in Windows 45 "freezegun", 46 "mock", 47 "pytest", 48 "pytest-asyncio", 49 "pytest-cov", 50 "pytest-docker", 51 "pytest-order", 52 "pytest-randomly", 53 "pytest-xdist", 54 "requests-mock", 55 ) 56 57 try: 58 session.run( 59 "pytest", 60 f"--randomly-seed={randint(0, 2**32-1)}", # noqa: S311, WPS432 61 *session.posargs, 62 env={"NOX_CURRENT_SESSION": "tests"}, 63 ) 64 finally: 65 if session.interactive: 66 session.notify("coverage", posargs=[]) 67 68 69 @nox_session(python=main_python_version) 70 def coverage(session: Session) -> None: 71 """Upload coverage data. 72 73 Args: 74 session: Nox session. 75 """ 76 args = session.posargs or ["report"] 77 78 session.install("coverage[toml]") 79 80 if not session.posargs and any(Path().glob(".coverage.*")): 81 session.run("coverage", "combine") 82 83 session.run("coverage", *args) 84 85 86 @nox_session(python=main_python_version) 87 def mypy(session: Session) -> None: 88 """Run mypy type checking. 89 90 Args: 91 session: Nox session. 92 """ 93 args = session.posargs or ["src/meltano", "--exclude", "src/meltano/migrations/"] 94 95 session.install(".") 96 session.install( 97 "mypy", 98 "sqlalchemy2-stubs", 99 "types-croniter", 100 "types-psutil", 101 "types-requests", 102 "boto3-stubs[essential]", 103 ) 104 session.run("mypy", *args) 105 [end of noxfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/noxfile.py b/noxfile.py --- a/noxfile.py +++ b/noxfile.py @@ -20,7 +20,7 @@ package = "meltano" -python_versions = ["3.10", "3.9", "3.8", "3.7"] +python_versions = ["3.11", "3.10", "3.9", "3.8", "3.7"] main_python_version = "3.9" locations = "src", "tests", "noxfile.py"
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -20,7 +20,7 @@\n \n \n package = \"meltano\"\n-python_versions = [\"3.10\", \"3.9\", \"3.8\", \"3.7\"]\n+python_versions = [\"3.11\", \"3.10\", \"3.9\", \"3.8\", \"3.7\"]\n main_python_version = \"3.9\"\n locations = \"src\", \"tests\", \"noxfile.py\"\n", "issue": "feature: Python 3.11 support\n### Feature scope\n\nOther\n\n### Description\n\nPython 3.11.0 is planned to be officially released as of 2022-10-24. We should add it to our test matrix, and build Docker images for it for each release.\n", "before_files": [{"content": "\"\"\"Nox configuration.\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nfrom pathlib import Path\nfrom random import randint\nfrom textwrap import dedent\n\ntry:\n from nox_poetry import Session\n from nox_poetry import session as nox_session\nexcept ImportError:\n message = f\"\"\"\\\n Nox failed to import the 'nox-poetry' package.\n Please install it using the following command:\n {sys.executable} -m pip install nox-poetry\"\"\"\n raise SystemExit(dedent(message)) from None\n\n\npackage = \"meltano\"\npython_versions = [\"3.10\", \"3.9\", \"3.8\", \"3.7\"]\nmain_python_version = \"3.9\"\nlocations = \"src\", \"tests\", \"noxfile.py\"\n\n\n@nox_session(python=python_versions)\ndef tests(session: Session) -> None:\n \"\"\"Execute pytest tests and compute coverage.\n\n Args:\n session: Nox session.\n \"\"\"\n backend_db = os.environ.get(\"PYTEST_BACKEND\", \"sqlite\")\n\n if backend_db == \"mssql\":\n session.install(\".[mssql,azure,gcs,s3]\")\n\n else:\n session.install(\".[azure,gcs,s3]\")\n\n session.install(\n \"colorama\", # colored output in Windows\n \"freezegun\",\n \"mock\",\n \"pytest\",\n \"pytest-asyncio\",\n \"pytest-cov\",\n \"pytest-docker\",\n \"pytest-order\",\n \"pytest-randomly\",\n \"pytest-xdist\",\n \"requests-mock\",\n )\n\n try:\n session.run(\n \"pytest\",\n f\"--randomly-seed={randint(0, 2**32-1)}\", # noqa: S311, WPS432\n *session.posargs,\n env={\"NOX_CURRENT_SESSION\": \"tests\"},\n )\n finally:\n if session.interactive:\n session.notify(\"coverage\", posargs=[])\n\n\n@nox_session(python=main_python_version)\ndef coverage(session: Session) -> None:\n \"\"\"Upload coverage data.\n\n Args:\n session: Nox session.\n \"\"\"\n args = session.posargs or [\"report\"]\n\n session.install(\"coverage[toml]\")\n\n if not session.posargs and any(Path().glob(\".coverage.*\")):\n session.run(\"coverage\", \"combine\")\n\n session.run(\"coverage\", *args)\n\n\n@nox_session(python=main_python_version)\ndef mypy(session: Session) -> None:\n \"\"\"Run mypy type checking.\n\n Args:\n session: Nox session.\n \"\"\"\n args = session.posargs or [\"src/meltano\", \"--exclude\", \"src/meltano/migrations/\"]\n\n session.install(\".\")\n session.install(\n \"mypy\",\n \"sqlalchemy2-stubs\",\n \"types-croniter\",\n \"types-psutil\",\n \"types-requests\",\n \"boto3-stubs[essential]\",\n )\n session.run(\"mypy\", *args)\n", "path": "noxfile.py"}]}
1,470
126
gh_patches_debug_11609
rasdani/github-patches
git_diff
google-research__text-to-text-transfer-transformer-39
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Do we support GPU distributed training? Hi, thanks for the awesome project! Does the code base support distributed training? If not, is it possible to support it after some code modifications? By the way, what is the way to set batch size and gpu number if I want to use GPU to train the model? Thank you for your kind attention. </issue> <code> [start of setup.py] 1 # Copyright 2019 The T5 Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Install T5.""" 16 17 import setuptools 18 19 # Get the long description from the README file. 20 with open('README.md') as fp: 21 _LONG_DESCRIPTION = fp.read() 22 23 setuptools.setup( 24 name='t5', 25 version='0.1.7', 26 description='Text-to-text transfer transformer', 27 long_description=_LONG_DESCRIPTION, 28 long_description_content_type='text/markdown', 29 author='Google Inc.', 30 author_email='[email protected]', 31 url='http://github.com/google-research/text-to-text-transfer-transformer', 32 license='Apache 2.0', 33 packages=setuptools.find_packages(), 34 package_data={ 35 '': ['*.gin'], 36 }, 37 scripts=[], 38 install_requires=[ 39 'absl-py', 40 'allennlp', 41 'babel', 42 'future', 43 'gin-config', 44 'mesh-tensorflow[transformer]>=0.1.8', 45 'nltk', 46 'numpy', 47 'pandas', 48 'rouge-score', 49 'sacrebleu', 50 'scikit-learn', 51 'scipy', 52 'sentencepiece', 53 'six', 54 'tensorflow-datasets>=1.3.2', 55 'tensorflow-text==1.15.0rc0', 56 ], 57 extras_require={ 58 'tensorflow': ['tensorflow==1.15'], 59 'gcp': ['gevent', 'google-api-python-client', 'google-compute-engine', 60 'google-cloud-storage', 'oauth2client'], 61 }, 62 entry_points={ 63 'console_scripts': [ 64 't5_mesh_transformer = ' 65 't5.models.mesh_transformer_main:console_entry_point', 66 ], 67 }, 68 classifiers=[ 69 'Development Status :: 4 - Beta', 70 'Intended Audience :: Developers', 71 'Intended Audience :: Science/Research', 72 'License :: OSI Approved :: Apache Software License', 73 'Topic :: Scientific/Engineering :: Artificial Intelligence', 74 ], 75 keywords='text nlp machinelearning', 76 ) 77 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -22,7 +22,7 @@ setuptools.setup( name='t5', - version='0.1.7', + version='0.1.8', description='Text-to-text transfer transformer', long_description=_LONG_DESCRIPTION, long_description_content_type='text/markdown', @@ -41,7 +41,7 @@ 'babel', 'future', 'gin-config', - 'mesh-tensorflow[transformer]>=0.1.8', + 'mesh-tensorflow[transformer]>=0.1.9', 'nltk', 'numpy', 'pandas',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,7 +22,7 @@\n \n setuptools.setup(\n name='t5',\n- version='0.1.7',\n+ version='0.1.8',\n description='Text-to-text transfer transformer',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n@@ -41,7 +41,7 @@\n 'babel',\n 'future',\n 'gin-config',\n- 'mesh-tensorflow[transformer]>=0.1.8',\n+ 'mesh-tensorflow[transformer]>=0.1.9',\n 'nltk',\n 'numpy',\n 'pandas',\n", "issue": "Do we support GPU distributed training?\nHi, thanks for the awesome project!\r\n\r\nDoes the code base support distributed training? If not, is it possible to support it after some code modifications?\r\n\r\nBy the way, what is the way to set batch size and gpu number if I want to use GPU to train the model?\r\n\r\nThank you for your kind attention.\n", "before_files": [{"content": "# Copyright 2019 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Install T5.\"\"\"\n\nimport setuptools\n\n# Get the long description from the README file.\nwith open('README.md') as fp:\n _LONG_DESCRIPTION = fp.read()\n\nsetuptools.setup(\n name='t5',\n version='0.1.7',\n description='Text-to-text transfer transformer',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n author='Google Inc.',\n author_email='[email protected]',\n url='http://github.com/google-research/text-to-text-transfer-transformer',\n license='Apache 2.0',\n packages=setuptools.find_packages(),\n package_data={\n '': ['*.gin'],\n },\n scripts=[],\n install_requires=[\n 'absl-py',\n 'allennlp',\n 'babel',\n 'future',\n 'gin-config',\n 'mesh-tensorflow[transformer]>=0.1.8',\n 'nltk',\n 'numpy',\n 'pandas',\n 'rouge-score',\n 'sacrebleu',\n 'scikit-learn',\n 'scipy',\n 'sentencepiece',\n 'six',\n 'tensorflow-datasets>=1.3.2',\n 'tensorflow-text==1.15.0rc0',\n ],\n extras_require={\n 'tensorflow': ['tensorflow==1.15'],\n 'gcp': ['gevent', 'google-api-python-client', 'google-compute-engine',\n 'google-cloud-storage', 'oauth2client'],\n },\n entry_points={\n 'console_scripts': [\n 't5_mesh_transformer = '\n 't5.models.mesh_transformer_main:console_entry_point',\n ],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n keywords='text nlp machinelearning',\n)\n", "path": "setup.py"}]}
1,303
162
gh_patches_debug_43005
rasdani/github-patches
git_diff
deepset-ai__haystack-6304
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `HTMLToDocument` to add `ByteStream` metadata to Document `HTMLToDocument` converter, when receiving a `ByteStream` from the `LinkContentFetcher` does not add the url to the metadata of the Document. The URL is in the metadata of the ByteStream </issue> <code> [start of haystack/preview/components/file_converters/html.py] 1 import logging 2 from typing import List, Union 3 from pathlib import Path 4 5 from haystack.preview import Document, component 6 from haystack.preview.dataclasses import ByteStream 7 from haystack.preview.lazy_imports import LazyImport 8 9 logger = logging.getLogger(__name__) 10 11 with LazyImport("Run 'pip install boilerpy3'") as boilerpy3_import: 12 from boilerpy3 import extractors 13 14 15 @component 16 class HTMLToDocument: 17 """ 18 Converts an HTML file to a Document. 19 """ 20 21 def __init__(self): 22 """ 23 Initializes the HTMLToDocument component. 24 """ 25 boilerpy3_import.check() 26 27 @component.output_types(documents=List[Document]) 28 def run(self, sources: List[Union[str, Path, ByteStream]]): 29 """ 30 Converts a list of HTML files to Documents. 31 32 :param sources: List of HTML file paths or ByteStream objects. 33 :return: List of converted Documents. 34 """ 35 documents = [] 36 extractor = extractors.ArticleExtractor(raise_on_failure=False) 37 for source in sources: 38 try: 39 file_content = self._extract_content(source) 40 except Exception as e: 41 logger.warning("Could not read %s. Skipping it. Error: %s", source, e) 42 continue 43 try: 44 text = extractor.get_content(file_content) 45 except Exception as conversion_e: # Consider specifying the expected exception type(s) here 46 logger.warning("Failed to extract text from %s. Skipping it. Error: %s", source, conversion_e) 47 continue 48 49 document = Document(content=text) 50 documents.append(document) 51 52 return {"documents": documents} 53 54 def _extract_content(self, source: Union[str, Path, ByteStream]) -> str: 55 """ 56 Extracts content from the given data source 57 :param source: The data source to extract content from. 58 :return: The extracted content. 59 """ 60 if isinstance(source, (str, Path)): 61 with open(source) as text_file: 62 return text_file.read() 63 if isinstance(source, ByteStream): 64 return source.data.decode("utf-8") 65 66 raise ValueError(f"Unsupported source type: {type(source)}") 67 [end of haystack/preview/components/file_converters/html.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/haystack/preview/components/file_converters/html.py b/haystack/preview/components/file_converters/html.py --- a/haystack/preview/components/file_converters/html.py +++ b/haystack/preview/components/file_converters/html.py @@ -1,6 +1,6 @@ import logging -from typing import List, Union from pathlib import Path +from typing import Any, Dict, List, Optional, Union from haystack.preview import Document, component from haystack.preview.dataclasses import ByteStream @@ -16,6 +16,18 @@ class HTMLToDocument: """ Converts an HTML file to a Document. + + Usage example: + ```python + from haystack.preview.components.file_converters.html import HTMLToDocument + + converter = HTMLToDocument() + results = converter.run(sources=["sample.html"]) + documents = results["documents"] + print(documents[0].content) + # 'This is a text from the HTML file.' + ``` + """ def __init__(self): @@ -25,18 +37,30 @@ boilerpy3_import.check() @component.output_types(documents=List[Document]) - def run(self, sources: List[Union[str, Path, ByteStream]]): + def run(self, sources: List[Union[str, Path, ByteStream]], meta: Optional[List[Dict[str, Any]]] = None): """ Converts a list of HTML files to Documents. :param sources: List of HTML file paths or ByteStream objects. + :param meta: Optional list of metadata to attach to the Documents. + The length of the list must match the number of sources. Defaults to `None`. :return: List of converted Documents. """ + documents = [] + + # Create metadata placeholders if not provided + if meta: + if len(sources) != len(meta): + raise ValueError("The length of the metadata list must match the number of sources.") + else: + meta = [{}] * len(sources) + extractor = extractors.ArticleExtractor(raise_on_failure=False) - for source in sources: + + for source, metadata in zip(sources, meta): try: - file_content = self._extract_content(source) + file_content, extracted_meta = self._extract_content(source) except Exception as e: logger.warning("Could not read %s. Skipping it. Error: %s", source, e) continue @@ -46,21 +70,25 @@ logger.warning("Failed to extract text from %s. Skipping it. Error: %s", source, conversion_e) continue - document = Document(content=text) + # Merge metadata received from ByteStream with supplied metadata + if extracted_meta: + # Supplied metadata overwrites metadata from ByteStream for overlapping keys. + metadata = {**extracted_meta, **metadata} + document = Document(content=text, meta=metadata) documents.append(document) return {"documents": documents} - def _extract_content(self, source: Union[str, Path, ByteStream]) -> str: + def _extract_content(self, source: Union[str, Path, ByteStream]) -> tuple: """ Extracts content from the given data source :param source: The data source to extract content from. - :return: The extracted content. + :return: The extracted content and metadata. """ if isinstance(source, (str, Path)): with open(source) as text_file: - return text_file.read() + return (text_file.read(), None) if isinstance(source, ByteStream): - return source.data.decode("utf-8") + return (source.data.decode("utf-8"), source.metadata) raise ValueError(f"Unsupported source type: {type(source)}")
{"golden_diff": "diff --git a/haystack/preview/components/file_converters/html.py b/haystack/preview/components/file_converters/html.py\n--- a/haystack/preview/components/file_converters/html.py\n+++ b/haystack/preview/components/file_converters/html.py\n@@ -1,6 +1,6 @@\n import logging\n-from typing import List, Union\n from pathlib import Path\n+from typing import Any, Dict, List, Optional, Union\n \n from haystack.preview import Document, component\n from haystack.preview.dataclasses import ByteStream\n@@ -16,6 +16,18 @@\n class HTMLToDocument:\n \"\"\"\n Converts an HTML file to a Document.\n+\n+ Usage example:\n+ ```python\n+ from haystack.preview.components.file_converters.html import HTMLToDocument\n+\n+ converter = HTMLToDocument()\n+ results = converter.run(sources=[\"sample.html\"])\n+ documents = results[\"documents\"]\n+ print(documents[0].content)\n+ # 'This is a text from the HTML file.'\n+ ```\n+\n \"\"\"\n \n def __init__(self):\n@@ -25,18 +37,30 @@\n boilerpy3_import.check()\n \n @component.output_types(documents=List[Document])\n- def run(self, sources: List[Union[str, Path, ByteStream]]):\n+ def run(self, sources: List[Union[str, Path, ByteStream]], meta: Optional[List[Dict[str, Any]]] = None):\n \"\"\"\n Converts a list of HTML files to Documents.\n \n :param sources: List of HTML file paths or ByteStream objects.\n+ :param meta: Optional list of metadata to attach to the Documents.\n+ The length of the list must match the number of sources. Defaults to `None`.\n :return: List of converted Documents.\n \"\"\"\n+\n documents = []\n+\n+ # Create metadata placeholders if not provided\n+ if meta:\n+ if len(sources) != len(meta):\n+ raise ValueError(\"The length of the metadata list must match the number of sources.\")\n+ else:\n+ meta = [{}] * len(sources)\n+\n extractor = extractors.ArticleExtractor(raise_on_failure=False)\n- for source in sources:\n+\n+ for source, metadata in zip(sources, meta):\n try:\n- file_content = self._extract_content(source)\n+ file_content, extracted_meta = self._extract_content(source)\n except Exception as e:\n logger.warning(\"Could not read %s. Skipping it. Error: %s\", source, e)\n continue\n@@ -46,21 +70,25 @@\n logger.warning(\"Failed to extract text from %s. Skipping it. Error: %s\", source, conversion_e)\n continue\n \n- document = Document(content=text)\n+ # Merge metadata received from ByteStream with supplied metadata\n+ if extracted_meta:\n+ # Supplied metadata overwrites metadata from ByteStream for overlapping keys.\n+ metadata = {**extracted_meta, **metadata}\n+ document = Document(content=text, meta=metadata)\n documents.append(document)\n \n return {\"documents\": documents}\n \n- def _extract_content(self, source: Union[str, Path, ByteStream]) -> str:\n+ def _extract_content(self, source: Union[str, Path, ByteStream]) -> tuple:\n \"\"\"\n Extracts content from the given data source\n :param source: The data source to extract content from.\n- :return: The extracted content.\n+ :return: The extracted content and metadata.\n \"\"\"\n if isinstance(source, (str, Path)):\n with open(source) as text_file:\n- return text_file.read()\n+ return (text_file.read(), None)\n if isinstance(source, ByteStream):\n- return source.data.decode(\"utf-8\")\n+ return (source.data.decode(\"utf-8\"), source.metadata)\n \n raise ValueError(f\"Unsupported source type: {type(source)}\")\n", "issue": "`HTMLToDocument` to add `ByteStream` metadata to Document \n`HTMLToDocument` converter, when receiving a `ByteStream` from the `LinkContentFetcher` does not add the url to the metadata of the Document. The URL is in the metadata of the ByteStream\r\n\n", "before_files": [{"content": "import logging\nfrom typing import List, Union\nfrom pathlib import Path\n\nfrom haystack.preview import Document, component\nfrom haystack.preview.dataclasses import ByteStream\nfrom haystack.preview.lazy_imports import LazyImport\n\nlogger = logging.getLogger(__name__)\n\nwith LazyImport(\"Run 'pip install boilerpy3'\") as boilerpy3_import:\n from boilerpy3 import extractors\n\n\n@component\nclass HTMLToDocument:\n \"\"\"\n Converts an HTML file to a Document.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initializes the HTMLToDocument component.\n \"\"\"\n boilerpy3_import.check()\n\n @component.output_types(documents=List[Document])\n def run(self, sources: List[Union[str, Path, ByteStream]]):\n \"\"\"\n Converts a list of HTML files to Documents.\n\n :param sources: List of HTML file paths or ByteStream objects.\n :return: List of converted Documents.\n \"\"\"\n documents = []\n extractor = extractors.ArticleExtractor(raise_on_failure=False)\n for source in sources:\n try:\n file_content = self._extract_content(source)\n except Exception as e:\n logger.warning(\"Could not read %s. Skipping it. Error: %s\", source, e)\n continue\n try:\n text = extractor.get_content(file_content)\n except Exception as conversion_e: # Consider specifying the expected exception type(s) here\n logger.warning(\"Failed to extract text from %s. Skipping it. Error: %s\", source, conversion_e)\n continue\n\n document = Document(content=text)\n documents.append(document)\n\n return {\"documents\": documents}\n\n def _extract_content(self, source: Union[str, Path, ByteStream]) -> str:\n \"\"\"\n Extracts content from the given data source\n :param source: The data source to extract content from.\n :return: The extracted content.\n \"\"\"\n if isinstance(source, (str, Path)):\n with open(source) as text_file:\n return text_file.read()\n if isinstance(source, ByteStream):\n return source.data.decode(\"utf-8\")\n\n raise ValueError(f\"Unsupported source type: {type(source)}\")\n", "path": "haystack/preview/components/file_converters/html.py"}]}
1,190
850
gh_patches_debug_23254
rasdani/github-patches
git_diff
facebookresearch__ParlAI-1939
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AttributeError: 'dict' object has no attribute 'force_set' **Bug description** When I try the pretrained model of Self-feeding Chatbot, by `python projects/self_feeding/interactive.py --model-file zoo:self_feeding/hh131k_hb60k_fb60k_st1k/model --no-cuda`, error occurs: AttributeError: 'dict' object has no attribute 'force_set' **Logs** Please paste the command line output: ``` Enter Your Message: hello Traceback (most recent call last): File "projects/self_feeding/interactive.py", line 87, in <module> interactive(parser.parse_args(print_args=False), print_parser=parser) File "projects/self_feeding/interactive.py", line 78, in interactive world.parley() File "/home/han/Github/ParlAI/parlai/core/worlds.py", line 273, in parley agents[1].observe(validate(acts[0])) File "/home/han/Github/ParlAI/projects/self_feeding/self_feeding_agent.py", line 370, in observe observation.force_set( AttributeError: 'dict' object has no attribute 'force_set' ``` **Additional context** Add any other context about the problem here. (like proxy settings, network setup, overall goals, etc.) </issue> <code> [start of parlai/agents/local_human/local_human.py] 1 #!/usr/bin/env python3 2 3 # Copyright (c) Facebook, Inc. and its affiliates. 4 # This source code is licensed under the MIT license found in the 5 # LICENSE file in the root directory of this source tree. 6 """Agent does gets the local keyboard input in the act() function. 7 Example: python examples/eval_model.py -m local_human -t babi:Task1k:1 -dt valid 8 """ 9 10 from parlai.core.agents import Agent 11 from parlai.core.utils import display_messages, load_cands 12 13 14 class LocalHumanAgent(Agent): 15 def add_cmdline_args(argparser): 16 """Add command-line arguments specifically for this agent.""" 17 agent = argparser.add_argument_group('Local Human Arguments') 18 agent.add_argument( 19 '-fixedCands', 20 '--local-human-candidates-file', 21 default=None, 22 type=str, 23 help='File of label_candidates to send to other agent', 24 ) 25 agent.add_argument( 26 '--single_turn', 27 type='bool', 28 default=False, 29 help='If on, assumes single turn episodes.', 30 ) 31 32 def __init__(self, opt, shared=None): 33 super().__init__(opt) 34 self.id = 'localHuman' 35 self.episodeDone = False 36 self.fixedCands_txt = load_cands(self.opt.get('local_human_candidates_file')) 37 print("Enter [DONE] if you want to end the episode.\n") 38 39 def observe(self, msg): 40 print( 41 display_messages( 42 [msg], 43 ignore_fields=self.opt.get('display_ignore_fields', ''), 44 prettify=self.opt.get('display_prettify', False), 45 ) 46 ) 47 48 def act(self): 49 reply = {} 50 reply['id'] = self.getID() 51 reply_text = input("Enter Your Message: ") 52 reply_text = reply_text.replace('\\n', '\n') 53 if self.opt.get('single_turn', False): 54 reply_text += '[DONE]' 55 reply['episode_done'] = False 56 reply['label_candidates'] = self.fixedCands_txt 57 if '[DONE]' in reply_text: 58 reply['episode_done'] = True 59 self.episodeDone = True 60 reply_text = reply_text.replace('[DONE]', '') 61 reply['text'] = reply_text 62 return reply 63 64 def episode_done(self): 65 return self.episodeDone 66 [end of parlai/agents/local_human/local_human.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/parlai/agents/local_human/local_human.py b/parlai/agents/local_human/local_human.py --- a/parlai/agents/local_human/local_human.py +++ b/parlai/agents/local_human/local_human.py @@ -8,6 +8,7 @@ """ from parlai.core.agents import Agent +from parlai.core.message import Message from parlai.core.utils import display_messages, load_cands @@ -46,7 +47,7 @@ ) def act(self): - reply = {} + reply = Message() reply['id'] = self.getID() reply_text = input("Enter Your Message: ") reply_text = reply_text.replace('\\n', '\n') @@ -55,7 +56,7 @@ reply['episode_done'] = False reply['label_candidates'] = self.fixedCands_txt if '[DONE]' in reply_text: - reply['episode_done'] = True + reply.force_set('episode_done', True) self.episodeDone = True reply_text = reply_text.replace('[DONE]', '') reply['text'] = reply_text
{"golden_diff": "diff --git a/parlai/agents/local_human/local_human.py b/parlai/agents/local_human/local_human.py\n--- a/parlai/agents/local_human/local_human.py\n+++ b/parlai/agents/local_human/local_human.py\n@@ -8,6 +8,7 @@\n \"\"\"\n \n from parlai.core.agents import Agent\n+from parlai.core.message import Message\n from parlai.core.utils import display_messages, load_cands\n \n \n@@ -46,7 +47,7 @@\n )\n \n def act(self):\n- reply = {}\n+ reply = Message()\n reply['id'] = self.getID()\n reply_text = input(\"Enter Your Message: \")\n reply_text = reply_text.replace('\\\\n', '\\n')\n@@ -55,7 +56,7 @@\n reply['episode_done'] = False\n reply['label_candidates'] = self.fixedCands_txt\n if '[DONE]' in reply_text:\n- reply['episode_done'] = True\n+ reply.force_set('episode_done', True)\n self.episodeDone = True\n reply_text = reply_text.replace('[DONE]', '')\n reply['text'] = reply_text\n", "issue": "AttributeError: 'dict' object has no attribute 'force_set'\n**Bug description**\r\nWhen I try the pretrained model of Self-feeding Chatbot, by `python projects/self_feeding/interactive.py --model-file zoo:self_feeding/hh131k_hb60k_fb60k_st1k/model --no-cuda`, error occurs: AttributeError: 'dict' object has no attribute 'force_set'\r\n\r\n**Logs**\r\nPlease paste the command line output:\r\n\r\n```\r\nEnter Your Message: hello\r\nTraceback (most recent call last):\r\n File \"projects/self_feeding/interactive.py\", line 87, in <module>\r\n interactive(parser.parse_args(print_args=False), print_parser=parser)\r\n File \"projects/self_feeding/interactive.py\", line 78, in interactive\r\n world.parley()\r\n File \"/home/han/Github/ParlAI/parlai/core/worlds.py\", line 273, in parley\r\n agents[1].observe(validate(acts[0]))\r\n File \"/home/han/Github/ParlAI/projects/self_feeding/self_feeding_agent.py\", line 370, in observe\r\n observation.force_set(\r\nAttributeError: 'dict' object has no attribute 'force_set'\r\n```\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. (like proxy settings, network setup, overall goals, etc.)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Agent does gets the local keyboard input in the act() function.\n Example: python examples/eval_model.py -m local_human -t babi:Task1k:1 -dt valid\n\"\"\"\n\nfrom parlai.core.agents import Agent\nfrom parlai.core.utils import display_messages, load_cands\n\n\nclass LocalHumanAgent(Agent):\n def add_cmdline_args(argparser):\n \"\"\"Add command-line arguments specifically for this agent.\"\"\"\n agent = argparser.add_argument_group('Local Human Arguments')\n agent.add_argument(\n '-fixedCands',\n '--local-human-candidates-file',\n default=None,\n type=str,\n help='File of label_candidates to send to other agent',\n )\n agent.add_argument(\n '--single_turn',\n type='bool',\n default=False,\n help='If on, assumes single turn episodes.',\n )\n\n def __init__(self, opt, shared=None):\n super().__init__(opt)\n self.id = 'localHuman'\n self.episodeDone = False\n self.fixedCands_txt = load_cands(self.opt.get('local_human_candidates_file'))\n print(\"Enter [DONE] if you want to end the episode.\\n\")\n\n def observe(self, msg):\n print(\n display_messages(\n [msg],\n ignore_fields=self.opt.get('display_ignore_fields', ''),\n prettify=self.opt.get('display_prettify', False),\n )\n )\n\n def act(self):\n reply = {}\n reply['id'] = self.getID()\n reply_text = input(\"Enter Your Message: \")\n reply_text = reply_text.replace('\\\\n', '\\n')\n if self.opt.get('single_turn', False):\n reply_text += '[DONE]'\n reply['episode_done'] = False\n reply['label_candidates'] = self.fixedCands_txt\n if '[DONE]' in reply_text:\n reply['episode_done'] = True\n self.episodeDone = True\n reply_text = reply_text.replace('[DONE]', '')\n reply['text'] = reply_text\n return reply\n\n def episode_done(self):\n return self.episodeDone\n", "path": "parlai/agents/local_human/local_human.py"}]}
1,460
257
gh_patches_debug_628
rasdani/github-patches
git_diff
litestar-org__litestar-1633
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> StaticFilesConfig and virtual directories I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems. https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32 </issue> <code> [start of tools/build_docs.py] 1 from __future__ import annotations 2 3 import argparse 4 import importlib.metadata 5 import json 6 import os 7 import shutil 8 import subprocess 9 from contextlib import contextmanager 10 from pathlib import Path 11 from typing import TypedDict 12 13 REDIRECT_TEMPLATE = """ 14 <!DOCTYPE HTML> 15 <html lang="en-US"> 16 <head> 17 <title>Page Redirection</title> 18 <meta charset="UTF-8"> 19 <meta http-equiv="refresh" content="0; url={target}"> 20 <script type="text/javascript">window.location.href = "{target}"</script> 21 </head> 22 <body> 23 You are being redirected. If this does not work, click <a href='{target}'>this link</a> 24 </body> 25 </html> 26 """ 27 28 parser = argparse.ArgumentParser() 29 parser.add_argument("--version", required=False) 30 parser.add_argument("--ignore-missing-examples-output", action="store_true", default=False) 31 parser.add_argument("output") 32 33 34 class VersionSpec(TypedDict): 35 versions: list[str] 36 latest: str 37 38 39 @contextmanager 40 def checkout(branch: str) -> None: 41 subprocess.run(["git", "checkout", branch], check=True) # noqa: S603 S607 42 yield 43 subprocess.run(["git", "checkout", "-"], check=True) # noqa: S603 S607 44 45 46 def load_version_spec() -> VersionSpec: 47 versions_file = Path("docs/_static/versions.json") 48 if versions_file.exists(): 49 return json.loads(versions_file.read_text()) 50 return {"versions": [], "latest": ""} 51 52 53 def build(output_dir: str, version: str | None, ignore_missing_output: bool) -> None: 54 if version is None: 55 version = importlib.metadata.version("litestar").rsplit(".")[0] 56 else: 57 os.environ["_LITESTAR_DOCS_BUILD_VERSION"] = version 58 59 if ignore_missing_output: 60 os.environ["_LITESTAR_DOCS_IGNORE_MISSING_EXAMPLE_OUTPUT"] = "1" 61 62 subprocess.run(["make", "docs"], check=True) # noqa: S603 S607 63 64 output_dir = Path(output_dir) 65 output_dir.mkdir() 66 output_dir.joinpath(".nojekyll").touch(exist_ok=True) 67 68 version_spec = load_version_spec() 69 is_latest = version == version_spec["latest"] 70 71 docs_src_path = Path("docs/_build/html") 72 73 output_dir.joinpath("index.html").write_text(REDIRECT_TEMPLATE.format(target="latest")) 74 75 if is_latest: 76 shutil.copytree(docs_src_path, output_dir / "latest", dirs_exist_ok=True) 77 shutil.copytree(docs_src_path, output_dir / version, dirs_exist_ok=True) 78 79 # copy existing versions into our output dir to preserve them when cleaning the branch 80 with checkout("gh-pages"): 81 for other_version in [*version_spec["versions"], "latest"]: 82 other_version_path = Path(other_version) 83 other_version_target_path = output_dir / other_version 84 if other_version_path.exists() and not other_version_target_path.exists(): 85 shutil.copytree(other_version_path, other_version_target_path) 86 87 88 def main() -> None: 89 args = parser.parse_args() 90 build( 91 output_dir=args.output, 92 version=args.version, 93 ignore_missing_output=args.ignore_missing_output, 94 ) 95 96 97 if __name__ == "__main__": 98 main() 99 [end of tools/build_docs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/build_docs.py b/tools/build_docs.py --- a/tools/build_docs.py +++ b/tools/build_docs.py @@ -90,7 +90,7 @@ build( output_dir=args.output, version=args.version, - ignore_missing_output=args.ignore_missing_output, + ignore_missing_output=args.ignore_missing_examples_output, )
{"golden_diff": "diff --git a/tools/build_docs.py b/tools/build_docs.py\n--- a/tools/build_docs.py\n+++ b/tools/build_docs.py\n@@ -90,7 +90,7 @@\n build(\n output_dir=args.output,\n version=args.version,\n- ignore_missing_output=args.ignore_missing_output,\n+ ignore_missing_output=args.ignore_missing_examples_output,\n )\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nimport argparse\nimport importlib.metadata\nimport json\nimport os\nimport shutil\nimport subprocess\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import TypedDict\n\nREDIRECT_TEMPLATE = \"\"\"\n<!DOCTYPE HTML>\n<html lang=\"en-US\">\n <head>\n <title>Page Redirection</title>\n <meta charset=\"UTF-8\">\n <meta http-equiv=\"refresh\" content=\"0; url={target}\">\n <script type=\"text/javascript\">window.location.href = \"{target}\"</script>\n </head>\n <body>\n You are being redirected. If this does not work, click <a href='{target}'>this link</a>\n </body>\n</html>\n\"\"\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--version\", required=False)\nparser.add_argument(\"--ignore-missing-examples-output\", action=\"store_true\", default=False)\nparser.add_argument(\"output\")\n\n\nclass VersionSpec(TypedDict):\n versions: list[str]\n latest: str\n\n\n@contextmanager\ndef checkout(branch: str) -> None:\n subprocess.run([\"git\", \"checkout\", branch], check=True) # noqa: S603 S607\n yield\n subprocess.run([\"git\", \"checkout\", \"-\"], check=True) # noqa: S603 S607\n\n\ndef load_version_spec() -> VersionSpec:\n versions_file = Path(\"docs/_static/versions.json\")\n if versions_file.exists():\n return json.loads(versions_file.read_text())\n return {\"versions\": [], \"latest\": \"\"}\n\n\ndef build(output_dir: str, version: str | None, ignore_missing_output: bool) -> None:\n if version is None:\n version = importlib.metadata.version(\"litestar\").rsplit(\".\")[0]\n else:\n os.environ[\"_LITESTAR_DOCS_BUILD_VERSION\"] = version\n\n if ignore_missing_output:\n os.environ[\"_LITESTAR_DOCS_IGNORE_MISSING_EXAMPLE_OUTPUT\"] = \"1\"\n\n subprocess.run([\"make\", \"docs\"], check=True) # noqa: S603 S607\n\n output_dir = Path(output_dir)\n output_dir.mkdir()\n output_dir.joinpath(\".nojekyll\").touch(exist_ok=True)\n\n version_spec = load_version_spec()\n is_latest = version == version_spec[\"latest\"]\n\n docs_src_path = Path(\"docs/_build/html\")\n\n output_dir.joinpath(\"index.html\").write_text(REDIRECT_TEMPLATE.format(target=\"latest\"))\n\n if is_latest:\n shutil.copytree(docs_src_path, output_dir / \"latest\", dirs_exist_ok=True)\n shutil.copytree(docs_src_path, output_dir / version, dirs_exist_ok=True)\n\n # copy existing versions into our output dir to preserve them when cleaning the branch\n with checkout(\"gh-pages\"):\n for other_version in [*version_spec[\"versions\"], \"latest\"]:\n other_version_path = Path(other_version)\n other_version_target_path = output_dir / other_version\n if other_version_path.exists() and not other_version_target_path.exists():\n shutil.copytree(other_version_path, other_version_target_path)\n\n\ndef main() -> None:\n args = parser.parse_args()\n build(\n output_dir=args.output,\n version=args.version,\n ignore_missing_output=args.ignore_missing_output,\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/build_docs.py"}]}
1,620
78
gh_patches_debug_32292
rasdani/github-patches
git_diff
CiviWiki__OpenCiviWiki-1088
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Migrate threads urls to path in `threads` app, we need to change `url()` function with `path()` function as discussed in #1066 https://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41 Conversion to [path](https://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41) is simple. For example, ```python url(r"^thread_data/(?P<thread_id>\w+)/$", get_thread, name="get thread"), ``` should become ```python path("thread_data/(<int:thread_id>/", get_thread, name="get thread"), ``` We need to be changed all usages of `url()` function in `threads` app. Migrate threads urls to path in `threads` app, we need to change `url()` function with `path()` function as discussed in #1066 https://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41 Conversion to [path](https://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41) is simple. For example, ```python url(r"^thread_data/(?P<thread_id>\w+)/$", get_thread, name="get thread"), ``` should become ```python path("thread_data/(<int:thread_id>/", get_thread, name="get thread"), ``` We need to be changed all usages of `url()` function in `threads` app. </issue> <code> [start of project/threads/urls.py] 1 from django.conf.urls import include, url 2 from rest_framework.routers import DefaultRouter 3 4 from .api import (create_civi, delete_civi, edit_civi, edit_thread, get_civi, 5 get_thread, rate_civi, upload_civi_image, new_thread, get_civis, 6 get_responses, upload_thread_image) 7 8 from .views import ( 9 ThreadViewSet, CategoryViewSet, 10 CiviViewSet 11 ) 12 from accounts.api import ProfileViewSet 13 14 router = DefaultRouter(trailing_slash=False) 15 router.register(r"threads", ThreadViewSet) 16 router.register(r"categories", CategoryViewSet) 17 router.register(r"civis", CiviViewSet) 18 router.register(r"accounts", ProfileViewSet) 19 20 urlpatterns = [ 21 url(r"^v1/", include(router.urls)), 22 ] 23 24 urlpatterns += [ 25 url(r"^thread_data/(?P<thread_id>\w+)/$", get_thread, name="get thread"), 26 url(r"^civi_data/(?P<civi_id>\w+)$", get_civi, name="get civi"), 27 url(r"^threads/(?P<thread_id>\w+)/civis$", get_civis, name="get civis"), 28 url( 29 r"^response_data/(?P<thread_id>\w+)/(?P<civi_id>\w+)/$", 30 get_responses, 31 name="get responses", 32 ), 33 url(r"^new_thread/$", new_thread, name="new thread"), 34 url(r"^edit_thread/$", edit_thread, name="edit thread"), 35 url(r"^new_civi/$", create_civi, name="new civi"), 36 url(r"^rate_civi/$", rate_civi, name="rate civi"), 37 url(r"^edit_civi/$", edit_civi, name="edit civi"), 38 url(r"^delete_civi/$", delete_civi, name="delete civi"), 39 url(r"^upload_images/$", upload_civi_image, name="upload images"), 40 url(r"^upload_image/$", upload_thread_image, name="upload image"), 41 ] 42 [end of project/threads/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/project/threads/urls.py b/project/threads/urls.py --- a/project/threads/urls.py +++ b/project/threads/urls.py @@ -1,4 +1,5 @@ -from django.conf.urls import include, url +from django.conf.urls import include +from django.urls import path from rest_framework.routers import DefaultRouter from .api import (create_civi, delete_civi, edit_civi, edit_thread, get_civi, @@ -18,24 +19,24 @@ router.register(r"accounts", ProfileViewSet) urlpatterns = [ - url(r"^v1/", include(router.urls)), + path("v1/", include(router.urls)), ] urlpatterns += [ - url(r"^thread_data/(?P<thread_id>\w+)/$", get_thread, name="get thread"), - url(r"^civi_data/(?P<civi_id>\w+)$", get_civi, name="get civi"), - url(r"^threads/(?P<thread_id>\w+)/civis$", get_civis, name="get civis"), - url( - r"^response_data/(?P<thread_id>\w+)/(?P<civi_id>\w+)/$", + path("thread_data/<int:thread_id>/", get_thread, name="get thread"), + path("civi_data/<int:civi_id>/", get_civi, name="get civi"), + path("threads/<int:thread_id>/civis", get_civis, name="get civis"), + path( + "response_data/<int:thread_id>/<int:civi_id>/", get_responses, name="get responses", ), - url(r"^new_thread/$", new_thread, name="new thread"), - url(r"^edit_thread/$", edit_thread, name="edit thread"), - url(r"^new_civi/$", create_civi, name="new civi"), - url(r"^rate_civi/$", rate_civi, name="rate civi"), - url(r"^edit_civi/$", edit_civi, name="edit civi"), - url(r"^delete_civi/$", delete_civi, name="delete civi"), - url(r"^upload_images/$", upload_civi_image, name="upload images"), - url(r"^upload_image/$", upload_thread_image, name="upload image"), + path("new_thread/", new_thread, name="new thread"), + path("edit_thread/", edit_thread, name="edit thread"), + path("new_civi/", create_civi, name="new civi"), + path("rate_civi/", rate_civi, name="rate civi"), + path("edit_civi/", edit_civi, name="edit civi"), + path("delete_civi/", delete_civi, name="delete civi"), + path("upload_images/", upload_civi_image, name="upload images"), + path("upload_image/", upload_thread_image, name="upload image"), ]
{"golden_diff": "diff --git a/project/threads/urls.py b/project/threads/urls.py\n--- a/project/threads/urls.py\n+++ b/project/threads/urls.py\n@@ -1,4 +1,5 @@\n-from django.conf.urls import include, url\r\n+from django.conf.urls import include\r\n+from django.urls import path\r\n from rest_framework.routers import DefaultRouter\r\n \r\n from .api import (create_civi, delete_civi, edit_civi, edit_thread, get_civi,\r\n@@ -18,24 +19,24 @@\n router.register(r\"accounts\", ProfileViewSet)\r\n \r\n urlpatterns = [\r\n- url(r\"^v1/\", include(router.urls)),\r\n+ path(\"v1/\", include(router.urls)),\r\n ]\r\n \r\n urlpatterns += [\r\n- url(r\"^thread_data/(?P<thread_id>\\w+)/$\", get_thread, name=\"get thread\"),\r\n- url(r\"^civi_data/(?P<civi_id>\\w+)$\", get_civi, name=\"get civi\"),\r\n- url(r\"^threads/(?P<thread_id>\\w+)/civis$\", get_civis, name=\"get civis\"),\r\n- url(\r\n- r\"^response_data/(?P<thread_id>\\w+)/(?P<civi_id>\\w+)/$\",\r\n+ path(\"thread_data/<int:thread_id>/\", get_thread, name=\"get thread\"),\r\n+ path(\"civi_data/<int:civi_id>/\", get_civi, name=\"get civi\"),\r\n+ path(\"threads/<int:thread_id>/civis\", get_civis, name=\"get civis\"),\r\n+ path(\r\n+ \"response_data/<int:thread_id>/<int:civi_id>/\",\r\n get_responses,\r\n name=\"get responses\",\r\n ),\r\n- url(r\"^new_thread/$\", new_thread, name=\"new thread\"),\r\n- url(r\"^edit_thread/$\", edit_thread, name=\"edit thread\"),\r\n- url(r\"^new_civi/$\", create_civi, name=\"new civi\"),\r\n- url(r\"^rate_civi/$\", rate_civi, name=\"rate civi\"),\r\n- url(r\"^edit_civi/$\", edit_civi, name=\"edit civi\"),\r\n- url(r\"^delete_civi/$\", delete_civi, name=\"delete civi\"),\r\n- url(r\"^upload_images/$\", upload_civi_image, name=\"upload images\"),\r\n- url(r\"^upload_image/$\", upload_thread_image, name=\"upload image\"),\r\n+ path(\"new_thread/\", new_thread, name=\"new thread\"),\r\n+ path(\"edit_thread/\", edit_thread, name=\"edit thread\"),\r\n+ path(\"new_civi/\", create_civi, name=\"new civi\"),\r\n+ path(\"rate_civi/\", rate_civi, name=\"rate civi\"),\r\n+ path(\"edit_civi/\", edit_civi, name=\"edit civi\"),\r\n+ path(\"delete_civi/\", delete_civi, name=\"delete civi\"),\r\n+ path(\"upload_images/\", upload_civi_image, name=\"upload images\"),\r\n+ path(\"upload_image/\", upload_thread_image, name=\"upload image\"),\r\n ]\n", "issue": "Migrate threads urls to path\nin `threads` app, we need to change `url()` function with `path()` function as discussed in #1066\r\n\r\nhttps://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41\r\n\r\nConversion to [path](https://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41) is simple.\r\n\r\nFor example,\r\n\r\n```python\r\nurl(r\"^thread_data/(?P<thread_id>\\w+)/$\", get_thread, name=\"get thread\"),\r\n```\r\n\r\nshould become\r\n\r\n```python\r\npath(\"thread_data/(<int:thread_id>/\", get_thread, name=\"get thread\"),\r\n```\r\n\r\nWe need to be changed all usages of `url()` function in `threads` app.\nMigrate threads urls to path\nin `threads` app, we need to change `url()` function with `path()` function as discussed in #1066\r\n\r\nhttps://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41\r\n\r\nConversion to [path](https://github.com/CiviWiki/OpenCiviWiki/blob/d7f24fad7c0a2565da3bf2cd54e89a847d7479dd/project/threads/urls.py#L19-L41) is simple.\r\n\r\nFor example,\r\n\r\n```python\r\nurl(r\"^thread_data/(?P<thread_id>\\w+)/$\", get_thread, name=\"get thread\"),\r\n```\r\n\r\nshould become\r\n\r\n```python\r\npath(\"thread_data/(<int:thread_id>/\", get_thread, name=\"get thread\"),\r\n```\r\n\r\nWe need to be changed all usages of `url()` function in `threads` app.\n", "before_files": [{"content": "from django.conf.urls import include, url\r\nfrom rest_framework.routers import DefaultRouter\r\n\r\nfrom .api import (create_civi, delete_civi, edit_civi, edit_thread, get_civi,\r\n get_thread, rate_civi, upload_civi_image, new_thread, get_civis,\r\n get_responses, upload_thread_image)\r\n\r\nfrom .views import (\r\n ThreadViewSet, CategoryViewSet,\r\n CiviViewSet\r\n)\r\nfrom accounts.api import ProfileViewSet\r\n\r\nrouter = DefaultRouter(trailing_slash=False)\r\nrouter.register(r\"threads\", ThreadViewSet)\r\nrouter.register(r\"categories\", CategoryViewSet)\r\nrouter.register(r\"civis\", CiviViewSet)\r\nrouter.register(r\"accounts\", ProfileViewSet)\r\n\r\nurlpatterns = [\r\n url(r\"^v1/\", include(router.urls)),\r\n]\r\n\r\nurlpatterns += [\r\n url(r\"^thread_data/(?P<thread_id>\\w+)/$\", get_thread, name=\"get thread\"),\r\n url(r\"^civi_data/(?P<civi_id>\\w+)$\", get_civi, name=\"get civi\"),\r\n url(r\"^threads/(?P<thread_id>\\w+)/civis$\", get_civis, name=\"get civis\"),\r\n url(\r\n r\"^response_data/(?P<thread_id>\\w+)/(?P<civi_id>\\w+)/$\",\r\n get_responses,\r\n name=\"get responses\",\r\n ),\r\n url(r\"^new_thread/$\", new_thread, name=\"new thread\"),\r\n url(r\"^edit_thread/$\", edit_thread, name=\"edit thread\"),\r\n url(r\"^new_civi/$\", create_civi, name=\"new civi\"),\r\n url(r\"^rate_civi/$\", rate_civi, name=\"rate civi\"),\r\n url(r\"^edit_civi/$\", edit_civi, name=\"edit civi\"),\r\n url(r\"^delete_civi/$\", delete_civi, name=\"delete civi\"),\r\n url(r\"^upload_images/$\", upload_civi_image, name=\"upload images\"),\r\n url(r\"^upload_image/$\", upload_thread_image, name=\"upload image\"),\r\n]\r\n", "path": "project/threads/urls.py"}]}
1,523
661
gh_patches_debug_26658
rasdani/github-patches
git_diff
pulp__pulpcore-2779
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Append of chunked upload processes raw data **Version** Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version. **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: **Expected behavior** A clear and concise description of what you expected to happen. **Additional context** Add any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla. </issue> <code> [start of pulpcore/app/models/upload.py] 1 import hashlib 2 import os 3 4 from django.core.files.base import ContentFile 5 from django.db import models 6 from django.db.models.signals import post_delete 7 from django.dispatch import receiver 8 from rest_framework import serializers 9 10 from pulpcore.app.models import BaseModel, fields, storage 11 12 13 class Upload(BaseModel): 14 """ 15 A chunked upload. Stores chunks until used to create an artifact, etc. 16 17 Fields: 18 19 size (models.BigIntegerField): The size of the file in bytes. 20 """ 21 22 size = models.BigIntegerField() 23 24 def append(self, chunk, offset, sha256=None): 25 """ 26 Append a chunk to an upload. 27 28 Args: 29 chunk (File): Binary file to append to the upload file. 30 offset (int): First byte position to write chunk to. 31 """ 32 chunk_read = chunk.read() 33 current_sha256 = hashlib.sha256(chunk_read).hexdigest() 34 if sha256 and sha256 != current_sha256: 35 raise serializers.ValidationError("Checksum does not match chunk upload.") 36 37 upload_chunk = UploadChunk(upload=self, offset=offset, size=len(chunk)) 38 filename = os.path.basename(upload_chunk.storage_path("")) 39 upload_chunk.file.save(filename, ContentFile(chunk_read)) 40 41 42 class UploadChunk(BaseModel): 43 """ 44 A chunk for an uploaded file. 45 46 Fields: 47 48 file (fields.FileField): A file where the uploaded chunk is stored. 49 upload (models.ForeignKey): Upload this chunk belongs to. 50 offset (models.BigIntegerField): Start of the chunk in bytes. 51 size (models.BigIntegerField): Size of the chunk in bytes. 52 """ 53 54 def storage_path(self, name): 55 """ 56 Callable used by FileField to determine where the uploaded file should be stored. 57 58 Args: 59 name (str): Original name of uploaded file. It is ignored by this method because the 60 pulp_id is used to determine a file path instead. 61 """ 62 return storage.get_upload_chunk_file_path(self.pulp_id) 63 64 file = fields.FileField(null=False, upload_to=storage_path, max_length=255) 65 upload = models.ForeignKey(Upload, on_delete=models.CASCADE, related_name="chunks") 66 offset = models.BigIntegerField() 67 size = models.BigIntegerField() 68 69 70 @receiver(post_delete, sender=UploadChunk) 71 def upload_chunk_delete(instance, **kwargs): 72 instance.file.delete(save=False) 73 [end of pulpcore/app/models/upload.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pulpcore/app/models/upload.py b/pulpcore/app/models/upload.py --- a/pulpcore/app/models/upload.py +++ b/pulpcore/app/models/upload.py @@ -1,6 +1,8 @@ import hashlib import os +from gettext import gettext as _ + from django.core.files.base import ContentFile from django.db import models from django.db.models.signals import post_delete @@ -26,17 +28,18 @@ Append a chunk to an upload. Args: - chunk (File): Binary file to append to the upload file. + chunk (File): Binary data to append to the upload file. offset (int): First byte position to write chunk to. """ - chunk_read = chunk.read() - current_sha256 = hashlib.sha256(chunk_read).hexdigest() - if sha256 and sha256 != current_sha256: - raise serializers.ValidationError("Checksum does not match chunk upload.") + chunk = chunk.read() + if sha256: + current_sha256 = hashlib.sha256(chunk).hexdigest() + if sha256 != current_sha256: + raise serializers.ValidationError(_("Checksum does not match chunk upload.")) upload_chunk = UploadChunk(upload=self, offset=offset, size=len(chunk)) filename = os.path.basename(upload_chunk.storage_path("")) - upload_chunk.file.save(filename, ContentFile(chunk_read)) + upload_chunk.file.save(filename, ContentFile(chunk)) class UploadChunk(BaseModel):
{"golden_diff": "diff --git a/pulpcore/app/models/upload.py b/pulpcore/app/models/upload.py\n--- a/pulpcore/app/models/upload.py\n+++ b/pulpcore/app/models/upload.py\n@@ -1,6 +1,8 @@\n import hashlib\n import os\n \n+from gettext import gettext as _\n+\n from django.core.files.base import ContentFile\n from django.db import models\n from django.db.models.signals import post_delete\n@@ -26,17 +28,18 @@\n Append a chunk to an upload.\n \n Args:\n- chunk (File): Binary file to append to the upload file.\n+ chunk (File): Binary data to append to the upload file.\n offset (int): First byte position to write chunk to.\n \"\"\"\n- chunk_read = chunk.read()\n- current_sha256 = hashlib.sha256(chunk_read).hexdigest()\n- if sha256 and sha256 != current_sha256:\n- raise serializers.ValidationError(\"Checksum does not match chunk upload.\")\n+ chunk = chunk.read()\n+ if sha256:\n+ current_sha256 = hashlib.sha256(chunk).hexdigest()\n+ if sha256 != current_sha256:\n+ raise serializers.ValidationError(_(\"Checksum does not match chunk upload.\"))\n \n upload_chunk = UploadChunk(upload=self, offset=offset, size=len(chunk))\n filename = os.path.basename(upload_chunk.storage_path(\"\"))\n- upload_chunk.file.save(filename, ContentFile(chunk_read))\n+ upload_chunk.file.save(filename, ContentFile(chunk))\n \n \n class UploadChunk(BaseModel):\n", "issue": "Append of chunked upload processes raw data\n**Version**\r\nPlease provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.\r\n\r\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.\r\n\n", "before_files": [{"content": "import hashlib\nimport os\n\nfrom django.core.files.base import ContentFile\nfrom django.db import models\nfrom django.db.models.signals import post_delete\nfrom django.dispatch import receiver\nfrom rest_framework import serializers\n\nfrom pulpcore.app.models import BaseModel, fields, storage\n\n\nclass Upload(BaseModel):\n \"\"\"\n A chunked upload. Stores chunks until used to create an artifact, etc.\n\n Fields:\n\n size (models.BigIntegerField): The size of the file in bytes.\n \"\"\"\n\n size = models.BigIntegerField()\n\n def append(self, chunk, offset, sha256=None):\n \"\"\"\n Append a chunk to an upload.\n\n Args:\n chunk (File): Binary file to append to the upload file.\n offset (int): First byte position to write chunk to.\n \"\"\"\n chunk_read = chunk.read()\n current_sha256 = hashlib.sha256(chunk_read).hexdigest()\n if sha256 and sha256 != current_sha256:\n raise serializers.ValidationError(\"Checksum does not match chunk upload.\")\n\n upload_chunk = UploadChunk(upload=self, offset=offset, size=len(chunk))\n filename = os.path.basename(upload_chunk.storage_path(\"\"))\n upload_chunk.file.save(filename, ContentFile(chunk_read))\n\n\nclass UploadChunk(BaseModel):\n \"\"\"\n A chunk for an uploaded file.\n\n Fields:\n\n file (fields.FileField): A file where the uploaded chunk is stored.\n upload (models.ForeignKey): Upload this chunk belongs to.\n offset (models.BigIntegerField): Start of the chunk in bytes.\n size (models.BigIntegerField): Size of the chunk in bytes.\n \"\"\"\n\n def storage_path(self, name):\n \"\"\"\n Callable used by FileField to determine where the uploaded file should be stored.\n\n Args:\n name (str): Original name of uploaded file. It is ignored by this method because the\n pulp_id is used to determine a file path instead.\n \"\"\"\n return storage.get_upload_chunk_file_path(self.pulp_id)\n\n file = fields.FileField(null=False, upload_to=storage_path, max_length=255)\n upload = models.ForeignKey(Upload, on_delete=models.CASCADE, related_name=\"chunks\")\n offset = models.BigIntegerField()\n size = models.BigIntegerField()\n\n\n@receiver(post_delete, sender=UploadChunk)\ndef upload_chunk_delete(instance, **kwargs):\n instance.file.delete(save=False)\n", "path": "pulpcore/app/models/upload.py"}]}
1,306
339
gh_patches_debug_16493
rasdani/github-patches
git_diff
svthalia__concrexit-2808
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Invited from queue email missing some stuff ### Describe the bug ![image](https://user-images.githubusercontent.com/41264528/217180789-a1478ad4-56b2-43af-b6f0-52ee272e2bc6.jpeg) ### How to reproduce Steps to reproduce the behaviour: 1. Be in queue 2. Get invited (by people deregistering) </issue> <code> [start of website/events/emails.py] 1 """The emails defined by the events package.""" 2 from django.conf import settings 3 from django.core.mail import EmailMessage 4 from django.template.loader import get_template 5 from django.utils.translation import gettext_lazy as _ 6 7 8 def notify_first_waiting(event): 9 """Send an email to the first person on the waiting list when someone cancels their registration. 10 11 :param event: the event 12 """ 13 if ( 14 event.max_participants is not None 15 and event.eventregistration_set.filter(date_cancelled=None).count() 16 > event.max_participants 17 ): 18 # Prepare email to send to the first person on the waiting list 19 first_waiting = event.eventregistration_set.filter( 20 date_cancelled=None 21 ).order_by("date")[event.max_participants] 22 23 text_template = get_template("events/member_email.txt") 24 25 subject = _("[THALIA] Notification about your registration for '{}'").format( 26 event.title 27 ) 28 text_message = text_template.render( 29 { 30 "event": event, 31 "registration": first_waiting, 32 "name": first_waiting.name or first_waiting.member.first_name, 33 "base_url": settings.BASE_URL, 34 } 35 ) 36 37 EmailMessage(subject, text_message, to=[first_waiting.email]).send() 38 39 40 def notify_organiser(event, registration): 41 """Send an email to the organiser of the event if someone cancels their registration. 42 43 :param event: the event 44 :param registration: the registration that was cancelled 45 """ 46 if not event.organisers.exists(): 47 return 48 49 text_template = get_template("events/organiser_email.txt") 50 subject = f"Registration for {event.title} cancelled by member" 51 text_message = text_template.render({"event": event, "registration": registration}) 52 53 EmailMessage( 54 subject, 55 text_message, 56 to=[ 57 organiser.contact_mailinglist.name + "@" + settings.SITE_DOMAIN 58 for organiser in event.organisers.all() 59 ], 60 ).send() 61 62 63 def notify_waiting(event, registration): 64 text_template = get_template("events/more_places_email.txt") 65 subject = _("[THALIA] Notification about your registration for '{}'").format( 66 event.title 67 ) 68 text_message = text_template.render( 69 { 70 "event": event, 71 "registration": registration, 72 "name": registration.name or registration.member.first_name, 73 "base_url": settings.BASE_URL, 74 } 75 ) 76 EmailMessage(subject, text_message, to=[registration.email]).send() 77 [end of website/events/emails.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/events/emails.py b/website/events/emails.py --- a/website/events/emails.py +++ b/website/events/emails.py @@ -25,12 +25,19 @@ subject = _("[THALIA] Notification about your registration for '{}'").format( event.title ) + + organiser_emails = [ + organiser.contact_address + for organiser in event.organisers.all() + if organiser.contact_address is not None + ] text_message = text_template.render( { "event": event, "registration": first_waiting, "name": first_waiting.name or first_waiting.member.first_name, "base_url": settings.BASE_URL, + "organisers": organiser_emails, } )
{"golden_diff": "diff --git a/website/events/emails.py b/website/events/emails.py\n--- a/website/events/emails.py\n+++ b/website/events/emails.py\n@@ -25,12 +25,19 @@\n subject = _(\"[THALIA] Notification about your registration for '{}'\").format(\n event.title\n )\n+\n+ organiser_emails = [\n+ organiser.contact_address\n+ for organiser in event.organisers.all()\n+ if organiser.contact_address is not None\n+ ]\n text_message = text_template.render(\n {\n \"event\": event,\n \"registration\": first_waiting,\n \"name\": first_waiting.name or first_waiting.member.first_name,\n \"base_url\": settings.BASE_URL,\n+ \"organisers\": organiser_emails,\n }\n )\n", "issue": "Invited from queue email missing some stuff\n### Describe the bug\n![image](https://user-images.githubusercontent.com/41264528/217180789-a1478ad4-56b2-43af-b6f0-52ee272e2bc6.jpeg)\n\n\n### How to reproduce\nSteps to reproduce the behaviour:\n1. Be in queue\n2. Get invited (by people deregistering)\n", "before_files": [{"content": "\"\"\"The emails defined by the events package.\"\"\"\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import get_template\nfrom django.utils.translation import gettext_lazy as _\n\n\ndef notify_first_waiting(event):\n \"\"\"Send an email to the first person on the waiting list when someone cancels their registration.\n\n :param event: the event\n \"\"\"\n if (\n event.max_participants is not None\n and event.eventregistration_set.filter(date_cancelled=None).count()\n > event.max_participants\n ):\n # Prepare email to send to the first person on the waiting list\n first_waiting = event.eventregistration_set.filter(\n date_cancelled=None\n ).order_by(\"date\")[event.max_participants]\n\n text_template = get_template(\"events/member_email.txt\")\n\n subject = _(\"[THALIA] Notification about your registration for '{}'\").format(\n event.title\n )\n text_message = text_template.render(\n {\n \"event\": event,\n \"registration\": first_waiting,\n \"name\": first_waiting.name or first_waiting.member.first_name,\n \"base_url\": settings.BASE_URL,\n }\n )\n\n EmailMessage(subject, text_message, to=[first_waiting.email]).send()\n\n\ndef notify_organiser(event, registration):\n \"\"\"Send an email to the organiser of the event if someone cancels their registration.\n\n :param event: the event\n :param registration: the registration that was cancelled\n \"\"\"\n if not event.organisers.exists():\n return\n\n text_template = get_template(\"events/organiser_email.txt\")\n subject = f\"Registration for {event.title} cancelled by member\"\n text_message = text_template.render({\"event\": event, \"registration\": registration})\n\n EmailMessage(\n subject,\n text_message,\n to=[\n organiser.contact_mailinglist.name + \"@\" + settings.SITE_DOMAIN\n for organiser in event.organisers.all()\n ],\n ).send()\n\n\ndef notify_waiting(event, registration):\n text_template = get_template(\"events/more_places_email.txt\")\n subject = _(\"[THALIA] Notification about your registration for '{}'\").format(\n event.title\n )\n text_message = text_template.render(\n {\n \"event\": event,\n \"registration\": registration,\n \"name\": registration.name or registration.member.first_name,\n \"base_url\": settings.BASE_URL,\n }\n )\n EmailMessage(subject, text_message, to=[registration.email]).send()\n", "path": "website/events/emails.py"}]}
1,312
175
gh_patches_debug_7108
rasdani/github-patches
git_diff
vas3k__vas3k.club-858
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: возможность замьютить самого себя ## Чеклист - [x] Я поискал поиском по трекеру похожие проблемы, в том числе в закрытых Issues - [x] Баг стабильно воспроизводится и я знаю как это сделать ## Описание бага Пользователь технически может замьюить самого себя, после чего не сможет увидеть свои посты и комментарии. Отмьютить себя пользователь тоже может самостоятельно (это работает в обе стороны). ## Ожидаемый результат - При вызове мьюта самого себя система показывает шаблон с заглушкой: нельзя мьютить себя. - Невозможность мьюта и размьюта самого себя. ## Шаги к воспроизведению 1. **Mute**: перейти на URL `vas3k.club/user/%USERNAME%/mute/`, где `%USERNAME%` — псевдоним пользователя (slug) 2. Следовать мастеру мьюта. 3. **Unmute**: перейти на URL `vas3k.club/user/%USERNAME%/mute/`, где `%USERNAME%` — псевдоним пользователя (slug) 4. Следовать мастеру мьюта. Скриншот со страницы шаблона мьюта (продакшн): ![image](https://user-images.githubusercontent.com/94802477/148997272-685359bd-4d61-4f69-9dee-2b6beb6045d3.png) </issue> <code> [start of users/views/muted.py] 1 from django.conf import settings 2 from django.http import HttpResponseForbidden 3 from django.shortcuts import get_object_or_404, render 4 5 from auth.helpers import auth_required 6 from club.exceptions import AccessDenied 7 from notifications.telegram.users import notify_admin_user_on_mute 8 from users.models.mute import Muted 9 from users.models.user import User 10 11 12 @auth_required 13 def toggle_mute(request, user_slug): 14 user_to = get_object_or_404(User, slug=user_slug) 15 if user_to.is_curator or user_to.is_moderator: 16 raise AccessDenied(title="У этого юзера иммунитет от мьюта") 17 18 total_user_muted_count = Muted.objects.filter(user_from=request.me).count() 19 20 # show form on GET 21 if request.method != "POST": 22 is_muted = Muted.is_muted( 23 user_from=request.me, 24 user_to=user_to, 25 ) 26 if is_muted: 27 return render(request, "users/mute/unmute.html", { 28 "user": user_to, 29 }) 30 else: 31 return render(request, "users/mute/mute.html", { 32 "user": user_to, 33 "mutes_left": settings.MAX_MUTE_COUNT - total_user_muted_count, 34 }) 35 36 # else — process POST 37 if total_user_muted_count > settings.MAX_MUTE_COUNT: 38 raise AccessDenied( 39 title="Вы замьютили слишком много людей", 40 message="Рекомендуем притормозить и поговорить с кем-нибудь..." 41 ) 42 43 comment = request.POST.get("comment") or "" 44 mute, is_created = Muted.mute( 45 user_from=request.me, 46 user_to=user_to, 47 comment=comment, 48 ) 49 50 if is_created: 51 # notify admins 52 notify_admin_user_on_mute( 53 user_from=request.me, 54 user_to=user_to, 55 comment=comment, 56 ) 57 58 return render(request, "users/messages/muted.html", { 59 "user": user_to, 60 }) 61 else: 62 # unmute this user 63 Muted.unmute( 64 user_from=request.me, 65 user_to=user_to, 66 ) 67 68 return render(request, "users/messages/unmuted.html", { 69 "user": user_to, 70 }) 71 72 73 @auth_required 74 def muted(request, user_slug): 75 if request.me.slug != user_slug: 76 return HttpResponseForbidden() 77 78 user = get_object_or_404(User, slug=user_slug) 79 muted_users = Muted.muted_by_user(user) 80 81 return render(request, "users/mute/index.html", { 82 "user": user, 83 "muted": muted_users, 84 }) 85 [end of users/views/muted.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/users/views/muted.py b/users/views/muted.py --- a/users/views/muted.py +++ b/users/views/muted.py @@ -15,6 +15,10 @@ if user_to.is_curator or user_to.is_moderator: raise AccessDenied(title="У этого юзера иммунитет от мьюта") + if user_to == request.me: + raise AccessDenied(title='Хорошая попытка, но мьютить себя нельзя. Кожаный мешок, ты прекрасен!', + message='') + total_user_muted_count = Muted.objects.filter(user_from=request.me).count() # show form on GET
{"golden_diff": "diff --git a/users/views/muted.py b/users/views/muted.py\n--- a/users/views/muted.py\n+++ b/users/views/muted.py\n@@ -15,6 +15,10 @@\n if user_to.is_curator or user_to.is_moderator:\n raise AccessDenied(title=\"\u0423 \u044d\u0442\u043e\u0433\u043e \u044e\u0437\u0435\u0440\u0430 \u0438\u043c\u043c\u0443\u043d\u0438\u0442\u0435\u0442 \u043e\u0442 \u043c\u044c\u044e\u0442\u0430\")\n \n+ if user_to == request.me:\n+ raise AccessDenied(title='\u0425\u043e\u0440\u043e\u0448\u0430\u044f \u043f\u043e\u043f\u044b\u0442\u043a\u0430, \u043d\u043e \u043c\u044c\u044e\u0442\u0438\u0442\u044c \u0441\u0435\u0431\u044f \u043d\u0435\u043b\u044c\u0437\u044f. \u041a\u043e\u0436\u0430\u043d\u044b\u0439 \u043c\u0435\u0448\u043e\u043a, \u0442\u044b \u043f\u0440\u0435\u043a\u0440\u0430\u0441\u0435\u043d!',\n+ message='')\n+\n total_user_muted_count = Muted.objects.filter(user_from=request.me).count()\n \n # show form on GET\n", "issue": "Bug: \u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e\u0441\u0442\u044c \u0437\u0430\u043c\u044c\u044e\u0442\u0438\u0442\u044c \u0441\u0430\u043c\u043e\u0433\u043e \u0441\u0435\u0431\u044f\n## \u0427\u0435\u043a\u043b\u0438\u0441\u0442\r\n\r\n- [x] \u042f \u043f\u043e\u0438\u0441\u043a\u0430\u043b \u043f\u043e\u0438\u0441\u043a\u043e\u043c \u043f\u043e \u0442\u0440\u0435\u043a\u0435\u0440\u0443 \u043f\u043e\u0445\u043e\u0436\u0438\u0435 \u043f\u0440\u043e\u0431\u043b\u0435\u043c\u044b, \u0432 \u0442\u043e\u043c \u0447\u0438\u0441\u043b\u0435 \u0432 \u0437\u0430\u043a\u0440\u044b\u0442\u044b\u0445 Issues\r\n- [x] \u0411\u0430\u0433 \u0441\u0442\u0430\u0431\u0438\u043b\u044c\u043d\u043e \u0432\u043e\u0441\u043f\u0440\u043e\u0438\u0437\u0432\u043e\u0434\u0438\u0442\u0441\u044f \u0438 \u044f \u0437\u043d\u0430\u044e \u043a\u0430\u043a \u044d\u0442\u043e \u0441\u0434\u0435\u043b\u0430\u0442\u044c\r\n\r\n## \u041e\u043f\u0438\u0441\u0430\u043d\u0438\u0435 \u0431\u0430\u0433\u0430\r\n\r\n\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u0442\u0435\u0445\u043d\u0438\u0447\u0435\u0441\u043a\u0438 \u043c\u043e\u0436\u0435\u0442 \u0437\u0430\u043c\u044c\u044e\u0438\u0442\u044c \u0441\u0430\u043c\u043e\u0433\u043e \u0441\u0435\u0431\u044f, \u043f\u043e\u0441\u043b\u0435 \u0447\u0435\u0433\u043e \u043d\u0435 \u0441\u043c\u043e\u0436\u0435\u0442 \u0443\u0432\u0438\u0434\u0435\u0442\u044c \u0441\u0432\u043e\u0438 \u043f\u043e\u0441\u0442\u044b \u0438 \u043a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0440\u0438\u0438.\r\n\u041e\u0442\u043c\u044c\u044e\u0442\u0438\u0442\u044c \u0441\u0435\u0431\u044f \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u0442\u043e\u0436\u0435 \u043c\u043e\u0436\u0435\u0442 \u0441\u0430\u043c\u043e\u0441\u0442\u043e\u044f\u0442\u0435\u043b\u044c\u043d\u043e (\u044d\u0442\u043e \u0440\u0430\u0431\u043e\u0442\u0430\u0435\u0442 \u0432 \u043e\u0431\u0435 \u0441\u0442\u043e\u0440\u043e\u043d\u044b).\r\n\r\n## \u041e\u0436\u0438\u0434\u0430\u0435\u043c\u044b\u0439 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\r\n\r\n- \u041f\u0440\u0438 \u0432\u044b\u0437\u043e\u0432\u0435 \u043c\u044c\u044e\u0442\u0430 \u0441\u0430\u043c\u043e\u0433\u043e \u0441\u0435\u0431\u044f \u0441\u0438\u0441\u0442\u0435\u043c\u0430 \u043f\u043e\u043a\u0430\u0437\u044b\u0432\u0430\u0435\u0442 \u0448\u0430\u0431\u043b\u043e\u043d \u0441 \u0437\u0430\u0433\u043b\u0443\u0448\u043a\u043e\u0439: \u043d\u0435\u043b\u044c\u0437\u044f \u043c\u044c\u044e\u0442\u0438\u0442\u044c \u0441\u0435\u0431\u044f.\r\n- \u041d\u0435\u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e\u0441\u0442\u044c \u043c\u044c\u044e\u0442\u0430 \u0438 \u0440\u0430\u0437\u043c\u044c\u044e\u0442\u0430 \u0441\u0430\u043c\u043e\u0433\u043e \u0441\u0435\u0431\u044f.\r\n\r\n## \u0428\u0430\u0433\u0438 \u043a \u0432\u043e\u0441\u043f\u0440\u043e\u0438\u0437\u0432\u0435\u0434\u0435\u043d\u0438\u044e\r\n\r\n1. **Mute**: \u043f\u0435\u0440\u0435\u0439\u0442\u0438 \u043d\u0430 URL `vas3k.club/user/%USERNAME%/mute/`, \u0433\u0434\u0435 `%USERNAME%` \u2014 \u043f\u0441\u0435\u0432\u0434\u043e\u043d\u0438\u043c \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044f (slug) \r\n2. \u0421\u043b\u0435\u0434\u043e\u0432\u0430\u0442\u044c \u043c\u0430\u0441\u0442\u0435\u0440\u0443 \u043c\u044c\u044e\u0442\u0430.\r\n3. **Unmute**: \u043f\u0435\u0440\u0435\u0439\u0442\u0438 \u043d\u0430 URL `vas3k.club/user/%USERNAME%/mute/`, \u0433\u0434\u0435 `%USERNAME%` \u2014 \u043f\u0441\u0435\u0432\u0434\u043e\u043d\u0438\u043c \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044f (slug) \r\n4. \u0421\u043b\u0435\u0434\u043e\u0432\u0430\u0442\u044c \u043c\u0430\u0441\u0442\u0435\u0440\u0443 \u043c\u044c\u044e\u0442\u0430.\r\n\r\n\u0421\u043a\u0440\u0438\u043d\u0448\u043e\u0442 \u0441\u043e \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u044b \u0448\u0430\u0431\u043b\u043e\u043d\u0430 \u043c\u044c\u044e\u0442\u0430 (\u043f\u0440\u043e\u0434\u0430\u043a\u0448\u043d):\r\n![image](https://user-images.githubusercontent.com/94802477/148997272-685359bd-4d61-4f69-9dee-2b6beb6045d3.png)\n", "before_files": [{"content": "from django.conf import settings\nfrom django.http import HttpResponseForbidden\nfrom django.shortcuts import get_object_or_404, render\n\nfrom auth.helpers import auth_required\nfrom club.exceptions import AccessDenied\nfrom notifications.telegram.users import notify_admin_user_on_mute\nfrom users.models.mute import Muted\nfrom users.models.user import User\n\n\n@auth_required\ndef toggle_mute(request, user_slug):\n user_to = get_object_or_404(User, slug=user_slug)\n if user_to.is_curator or user_to.is_moderator:\n raise AccessDenied(title=\"\u0423 \u044d\u0442\u043e\u0433\u043e \u044e\u0437\u0435\u0440\u0430 \u0438\u043c\u043c\u0443\u043d\u0438\u0442\u0435\u0442 \u043e\u0442 \u043c\u044c\u044e\u0442\u0430\")\n\n total_user_muted_count = Muted.objects.filter(user_from=request.me).count()\n\n # show form on GET\n if request.method != \"POST\":\n is_muted = Muted.is_muted(\n user_from=request.me,\n user_to=user_to,\n )\n if is_muted:\n return render(request, \"users/mute/unmute.html\", {\n \"user\": user_to,\n })\n else:\n return render(request, \"users/mute/mute.html\", {\n \"user\": user_to,\n \"mutes_left\": settings.MAX_MUTE_COUNT - total_user_muted_count,\n })\n\n # else \u2014 process POST\n if total_user_muted_count > settings.MAX_MUTE_COUNT:\n raise AccessDenied(\n title=\"\u0412\u044b \u0437\u0430\u043c\u044c\u044e\u0442\u0438\u043b\u0438 \u0441\u043b\u0438\u0448\u043a\u043e\u043c \u043c\u043d\u043e\u0433\u043e \u043b\u044e\u0434\u0435\u0439\",\n message=\"\u0420\u0435\u043a\u043e\u043c\u0435\u043d\u0434\u0443\u0435\u043c \u043f\u0440\u0438\u0442\u043e\u0440\u043c\u043e\u0437\u0438\u0442\u044c \u0438 \u043f\u043e\u0433\u043e\u0432\u043e\u0440\u0438\u0442\u044c \u0441 \u043a\u0435\u043c-\u043d\u0438\u0431\u0443\u0434\u044c...\"\n )\n\n comment = request.POST.get(\"comment\") or \"\"\n mute, is_created = Muted.mute(\n user_from=request.me,\n user_to=user_to,\n comment=comment,\n )\n\n if is_created:\n # notify admins\n notify_admin_user_on_mute(\n user_from=request.me,\n user_to=user_to,\n comment=comment,\n )\n\n return render(request, \"users/messages/muted.html\", {\n \"user\": user_to,\n })\n else:\n # unmute this user\n Muted.unmute(\n user_from=request.me,\n user_to=user_to,\n )\n\n return render(request, \"users/messages/unmuted.html\", {\n \"user\": user_to,\n })\n\n\n@auth_required\ndef muted(request, user_slug):\n if request.me.slug != user_slug:\n return HttpResponseForbidden()\n\n user = get_object_or_404(User, slug=user_slug)\n muted_users = Muted.muted_by_user(user)\n\n return render(request, \"users/mute/index.html\", {\n \"user\": user,\n \"muted\": muted_users,\n })\n", "path": "users/views/muted.py"}]}
1,646
158
gh_patches_debug_15840
rasdani/github-patches
git_diff
napari__napari-1494
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CTRL-C should exit napari gracefully ## 🐛 Bug After #1476 napari just prints `KeyboardInterrupt` when CTRL-C is pressed in the system terminal window that used to launch napari. Prior to 1476 it exited with a crash, which got the job done but was not great. Ideally napari would exit gracefully when you hit CTRL-C in the system terminal window. ## To Reproduce Steps to reproduce the behavior: 1. From a system terminal (e.g. Terminal program on mac) 2. Run "napari" or a script that uses `napari.gui_qt()` 3. Switch back to the terminal window and type CTRL-C ## Expected behavior Napari exits gracefully. ## Environment ``` napari: not-installed Platform: macOS-10.15.3-x86_64-i386-64bit Python: 3.8.1 (default, Jan 8 2020, 16:15:59) [Clang 4.0.1 (tags/RELEASE_401/final)] Qt: 5.14.2 PyQt5: 5.14.2 NumPy: 1.18.4 SciPy: 1.4.1 Dask: 2.17.2 VisPy: 0.6.5.dev111+g8387ea1a.d20200424 GL version: 2.1 ATI-3.5.5 MAX_TEXTURE_SIZE: 16384 Plugins: - napari-plugin-engine: 0.1.6 - svg: 0.1.3 ``` ## Additional context This is low priority since you can exit with the Quit command, or from the system terminal hit CTRL-Z and `kill %1` the app if necessary. However it seems like exiting gracefully is the right behavior long term. I tried adding this to our new `ExceptionHandler` class: ``` # Interpret CTRL-C as a request to quit. if isinstance(value, KeyboardInterrupt): QApplication.instance().quit() return ``` but while it exits cleanly sometimes, sometimes it bus errors or seg faults. </issue> <code> [start of napari/_qt/exceptions.py] 1 import logging 2 import os 3 import traceback 4 from types import TracebackType 5 from typing import Optional, Type 6 7 from qtpy.QtCore import QObject, Signal 8 9 from .qt_error_notification import NapariNotification 10 11 12 class ExceptionHandler(QObject): 13 """General class to handle all uncaught exceptions in the Qt event loop. 14 15 Parameters 16 ---------- 17 parent : QObject, optional 18 parent object, by default None 19 gui_exceptions : bool, optional 20 Whether to show exceptions as, by default True. May be overriden by 21 environment variable: ``NAPARI_CATCH_ERRORS=1` 22 Note: this ``False`` by default in ``gui_qt()`` (the main 23 instantiator of this class), but it is ``True`` in ``napari.__main__``. 24 As a result, exceptions will be shown in the GUI only (mostly) when 25 running napari as ``napari`` or ``python -m napari`` from the command 26 line. 27 """ 28 29 error = Signal(tuple) 30 message: Optional[NapariNotification] = None 31 32 def __init__(self, parent=None, *, gui_exceptions=True): 33 super().__init__(parent) 34 if os.getenv("NAPARI_CATCH_ERRORS") in ('0', 'False'): 35 self.gui_exceptions = False 36 else: 37 self.gui_exceptions = gui_exceptions 38 39 def handle( 40 self, 41 etype: Type[BaseException], 42 value: BaseException, 43 tb: TracebackType, 44 ): 45 """Our sys.excepthook override. 46 47 This function handles uncaught exceptions and can delegate to a 48 secondary handler, whether it be a GUI dialog, or an IPython traceback 49 printout. The override to ``sys.excepthook`` happens in 50 :func:`napari.gui_qt`, and therefore this is only active when the qt 51 event loop has been started by napari. 52 53 The three parameters here are what would be returned from 54 :func:`sys.exc_info()`. 55 56 Parameters 57 ---------- 58 etype : Type[BaseException] 59 The type of error raised 60 value : BaseException 61 The error instance 62 tb : TracebackType 63 The traceback object associated with the error. 64 """ 65 if self.gui_exceptions: 66 self._show_error_dialog(value) 67 else: 68 text = "".join(traceback.format_exception(etype, value, tb)) 69 logging.error("Unhandled exception:\n%s", text) 70 self.error.emit((etype, value, tb)) 71 72 def _show_error_dialog(self, exception: BaseException): 73 self.message = NapariNotification.from_exception(exception) 74 self.message.show() 75 [end of napari/_qt/exceptions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/napari/_qt/exceptions.py b/napari/_qt/exceptions.py --- a/napari/_qt/exceptions.py +++ b/napari/_qt/exceptions.py @@ -1,5 +1,6 @@ import logging import os +import sys import traceback from types import TracebackType from typing import Optional, Type @@ -62,6 +63,12 @@ tb : TracebackType The traceback object associated with the error. """ + # etype.__module__ contains the module raising the error + # Custom exception classes can have different behavior + # can add custom exception handlers here ... + if isinstance(value, KeyboardInterrupt): + print("Closed by KeyboardInterrupt", file=sys.stderr) + sys.exit(1) if self.gui_exceptions: self._show_error_dialog(value) else:
{"golden_diff": "diff --git a/napari/_qt/exceptions.py b/napari/_qt/exceptions.py\n--- a/napari/_qt/exceptions.py\n+++ b/napari/_qt/exceptions.py\n@@ -1,5 +1,6 @@\n import logging\n import os\n+import sys\n import traceback\n from types import TracebackType\n from typing import Optional, Type\n@@ -62,6 +63,12 @@\n tb : TracebackType\n The traceback object associated with the error.\n \"\"\"\n+ # etype.__module__ contains the module raising the error\n+ # Custom exception classes can have different behavior\n+ # can add custom exception handlers here ...\n+ if isinstance(value, KeyboardInterrupt):\n+ print(\"Closed by KeyboardInterrupt\", file=sys.stderr)\n+ sys.exit(1)\n if self.gui_exceptions:\n self._show_error_dialog(value)\n else:\n", "issue": "CTRL-C should exit napari gracefully\n## \ud83d\udc1b Bug\r\n\r\nAfter #1476 napari just prints `KeyboardInterrupt` when CTRL-C is pressed in the system terminal window that used to launch napari. Prior to 1476 it exited with a crash, which got the job done but was not great. \r\n\r\nIdeally napari would exit gracefully when you hit CTRL-C in the system terminal window.\r\n\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. From a system terminal (e.g. Terminal program on mac)\r\n2. Run \"napari\" or a script that uses `napari.gui_qt()`\r\n3. Switch back to the terminal window and type CTRL-C\r\n\r\n## Expected behavior\r\n\r\nNapari exits gracefully.\r\n\r\n## Environment\r\n\r\n```\r\nnapari: not-installed\r\nPlatform: macOS-10.15.3-x86_64-i386-64bit\r\nPython: 3.8.1 (default, Jan 8 2020, 16:15:59) [Clang 4.0.1 (tags/RELEASE_401/final)]\r\nQt: 5.14.2\r\nPyQt5: 5.14.2\r\nNumPy: 1.18.4\r\nSciPy: 1.4.1\r\nDask: 2.17.2\r\nVisPy: 0.6.5.dev111+g8387ea1a.d20200424\r\n\r\nGL version: 2.1 ATI-3.5.5\r\nMAX_TEXTURE_SIZE: 16384\r\n\r\nPlugins:\r\n- napari-plugin-engine: 0.1.6\r\n- svg: 0.1.3\r\n```\r\n\r\n## Additional context\r\n\r\nThis is low priority since you can exit with the Quit command, or from the system terminal hit CTRL-Z and `kill %1` the app if necessary. However it seems like exiting gracefully is the right behavior long term.\r\n\r\nI tried adding this to our new `ExceptionHandler` class:\r\n```\r\n # Interpret CTRL-C as a request to quit.\r\n if isinstance(value, KeyboardInterrupt):\r\n QApplication.instance().quit()\r\n return\r\n```\r\nbut while it exits cleanly sometimes, sometimes it bus errors or seg faults.\n", "before_files": [{"content": "import logging\nimport os\nimport traceback\nfrom types import TracebackType\nfrom typing import Optional, Type\n\nfrom qtpy.QtCore import QObject, Signal\n\nfrom .qt_error_notification import NapariNotification\n\n\nclass ExceptionHandler(QObject):\n \"\"\"General class to handle all uncaught exceptions in the Qt event loop.\n\n Parameters\n ----------\n parent : QObject, optional\n parent object, by default None\n gui_exceptions : bool, optional\n Whether to show exceptions as, by default True. May be overriden by\n environment variable: ``NAPARI_CATCH_ERRORS=1`\n Note: this ``False`` by default in ``gui_qt()`` (the main\n instantiator of this class), but it is ``True`` in ``napari.__main__``.\n As a result, exceptions will be shown in the GUI only (mostly) when\n running napari as ``napari`` or ``python -m napari`` from the command\n line.\n \"\"\"\n\n error = Signal(tuple)\n message: Optional[NapariNotification] = None\n\n def __init__(self, parent=None, *, gui_exceptions=True):\n super().__init__(parent)\n if os.getenv(\"NAPARI_CATCH_ERRORS\") in ('0', 'False'):\n self.gui_exceptions = False\n else:\n self.gui_exceptions = gui_exceptions\n\n def handle(\n self,\n etype: Type[BaseException],\n value: BaseException,\n tb: TracebackType,\n ):\n \"\"\"Our sys.excepthook override.\n\n This function handles uncaught exceptions and can delegate to a\n secondary handler, whether it be a GUI dialog, or an IPython traceback\n printout. The override to ``sys.excepthook`` happens in\n :func:`napari.gui_qt`, and therefore this is only active when the qt\n event loop has been started by napari.\n\n The three parameters here are what would be returned from\n :func:`sys.exc_info()`.\n\n Parameters\n ----------\n etype : Type[BaseException]\n The type of error raised\n value : BaseException\n The error instance\n tb : TracebackType\n The traceback object associated with the error.\n \"\"\"\n if self.gui_exceptions:\n self._show_error_dialog(value)\n else:\n text = \"\".join(traceback.format_exception(etype, value, tb))\n logging.error(\"Unhandled exception:\\n%s\", text)\n self.error.emit((etype, value, tb))\n\n def _show_error_dialog(self, exception: BaseException):\n self.message = NapariNotification.from_exception(exception)\n self.message.show()\n", "path": "napari/_qt/exceptions.py"}]}
1,749
193
gh_patches_debug_10139
rasdani/github-patches
git_diff
zestedesavoir__zds-site-3807
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [beta][v20] L'api des notifications renvoie que les notifs non lues Serveur : Beta Version : v20/6bb2f75 Système : Mac OS X Navigateur : 52.0.2743.116 (64-bit) --- 1. Récupérez vos notifications depuis l'API 2. Constatez que le serveur renvoie uniquement les notifs non lues. </issue> <code> [start of zds/notification/api/views.py] 1 # coding: utf-8 2 from dry_rest_permissions.generics import DRYPermissions 3 from rest_framework import filters 4 from rest_framework.generics import ListAPIView 5 from rest_framework.permissions import IsAuthenticated 6 from rest_framework_extensions.cache.decorators import cache_response 7 from rest_framework_extensions.etag.decorators import etag 8 from rest_framework_extensions.key_constructor import bits 9 from rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor 10 11 from zds.api.bits import DJRF3xPaginationKeyBit 12 from zds.notification.api.serializers import NotificationSerializer 13 from zds.notification.models import Notification 14 15 16 class PagingNotificationListKeyConstructor(DefaultKeyConstructor): 17 pagination = DJRF3xPaginationKeyBit() 18 search = bits.QueryParamsKeyBit(['search', 'ordering', 'type']) 19 list_sql_query = bits.ListSqlQueryKeyBit() 20 unique_view_id = bits.UniqueViewIdKeyBit() 21 user = bits.UserKeyBit() 22 23 24 class NotificationListAPI(ListAPIView): 25 """ 26 List of notification. 27 """ 28 29 filter_backends = (filters.SearchFilter, filters.OrderingFilter) 30 search_fields = ('title',) 31 ordering_fields = ('pubdate', 'title',) 32 list_key_func = PagingNotificationListKeyConstructor() 33 serializer_class = NotificationSerializer 34 permission_classes = (IsAuthenticated, DRYPermissions,) 35 36 @etag(list_key_func) 37 @cache_response(key_func=list_key_func) 38 def get(self, request, *args, **kwargs): 39 """ 40 Lists all notifications of a user. 41 --- 42 43 parameters: 44 - name: Authorization 45 description: Bearer token to make an authenticated request. 46 required: true 47 paramType: header 48 - name: page 49 description: Restricts output to the given page number. 50 required: false 51 paramType: query 52 - name: page_size 53 description: Sets the number of notifications per page. 54 required: false 55 paramType: query 56 - name: search 57 description: Filters by title. 58 required: false 59 paramType: query 60 - name: ordering 61 description: Sorts the results. You can order by (-)pubdate or (-)title. 62 paramType: query 63 - name: type 64 description: Filters by notification type. 65 paramType: query 66 - name: subscription_type 67 description: Filters by subscription type. 68 paramType: query 69 - name: expand 70 description: Returns an object instead of an identifier representing the given field. 71 required: false 72 paramType: query 73 responseMessages: 74 - code: 401 75 message: Not Authenticated 76 - code: 404 77 message: Not Found 78 """ 79 return self.list(request, *args, **kwargs) 80 81 def get_queryset(self): 82 queryset = Notification.objects.get_unread_notifications_of(self.request.user) 83 subscription_type = self.request.query_params.get('subscription_type', None) 84 if subscription_type: 85 queryset = queryset.filter(subscription__content_type__model=subscription_type) 86 _type = self.request.query_params.get('type', None) 87 if _type: 88 queryset = queryset.filter(content_type__model=_type) 89 return queryset 90 [end of zds/notification/api/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/notification/api/views.py b/zds/notification/api/views.py --- a/zds/notification/api/views.py +++ b/zds/notification/api/views.py @@ -79,7 +79,7 @@ return self.list(request, *args, **kwargs) def get_queryset(self): - queryset = Notification.objects.get_unread_notifications_of(self.request.user) + queryset = Notification.objects.get_notifications_of(self.request.user) subscription_type = self.request.query_params.get('subscription_type', None) if subscription_type: queryset = queryset.filter(subscription__content_type__model=subscription_type)
{"golden_diff": "diff --git a/zds/notification/api/views.py b/zds/notification/api/views.py\n--- a/zds/notification/api/views.py\n+++ b/zds/notification/api/views.py\n@@ -79,7 +79,7 @@\n return self.list(request, *args, **kwargs)\n \n def get_queryset(self):\n- queryset = Notification.objects.get_unread_notifications_of(self.request.user)\n+ queryset = Notification.objects.get_notifications_of(self.request.user)\n subscription_type = self.request.query_params.get('subscription_type', None)\n if subscription_type:\n queryset = queryset.filter(subscription__content_type__model=subscription_type)\n", "issue": "[beta][v20] L'api des notifications renvoie que les notifs non lues\nServeur : Beta\nVersion : v20/6bb2f75\nSyst\u00e8me : Mac OS X\nNavigateur : 52.0.2743.116 (64-bit)\n\n---\n1. R\u00e9cup\u00e9rez vos notifications depuis l'API\n2. Constatez que le serveur renvoie uniquement les notifs non lues.\n\n", "before_files": [{"content": "# coding: utf-8\nfrom dry_rest_permissions.generics import DRYPermissions\nfrom rest_framework import filters\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_extensions.cache.decorators import cache_response\nfrom rest_framework_extensions.etag.decorators import etag\nfrom rest_framework_extensions.key_constructor import bits\nfrom rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor\n\nfrom zds.api.bits import DJRF3xPaginationKeyBit\nfrom zds.notification.api.serializers import NotificationSerializer\nfrom zds.notification.models import Notification\n\n\nclass PagingNotificationListKeyConstructor(DefaultKeyConstructor):\n pagination = DJRF3xPaginationKeyBit()\n search = bits.QueryParamsKeyBit(['search', 'ordering', 'type'])\n list_sql_query = bits.ListSqlQueryKeyBit()\n unique_view_id = bits.UniqueViewIdKeyBit()\n user = bits.UserKeyBit()\n\n\nclass NotificationListAPI(ListAPIView):\n \"\"\"\n List of notification.\n \"\"\"\n\n filter_backends = (filters.SearchFilter, filters.OrderingFilter)\n search_fields = ('title',)\n ordering_fields = ('pubdate', 'title',)\n list_key_func = PagingNotificationListKeyConstructor()\n serializer_class = NotificationSerializer\n permission_classes = (IsAuthenticated, DRYPermissions,)\n\n @etag(list_key_func)\n @cache_response(key_func=list_key_func)\n def get(self, request, *args, **kwargs):\n \"\"\"\n Lists all notifications of a user.\n ---\n\n parameters:\n - name: Authorization\n description: Bearer token to make an authenticated request.\n required: true\n paramType: header\n - name: page\n description: Restricts output to the given page number.\n required: false\n paramType: query\n - name: page_size\n description: Sets the number of notifications per page.\n required: false\n paramType: query\n - name: search\n description: Filters by title.\n required: false\n paramType: query\n - name: ordering\n description: Sorts the results. You can order by (-)pubdate or (-)title.\n paramType: query\n - name: type\n description: Filters by notification type.\n paramType: query\n - name: subscription_type\n description: Filters by subscription type.\n paramType: query\n - name: expand\n description: Returns an object instead of an identifier representing the given field.\n required: false\n paramType: query\n responseMessages:\n - code: 401\n message: Not Authenticated\n - code: 404\n message: Not Found\n \"\"\"\n return self.list(request, *args, **kwargs)\n\n def get_queryset(self):\n queryset = Notification.objects.get_unread_notifications_of(self.request.user)\n subscription_type = self.request.query_params.get('subscription_type', None)\n if subscription_type:\n queryset = queryset.filter(subscription__content_type__model=subscription_type)\n _type = self.request.query_params.get('type', None)\n if _type:\n queryset = queryset.filter(content_type__model=_type)\n return queryset\n", "path": "zds/notification/api/views.py"}]}
1,491
132
gh_patches_debug_20469
rasdani/github-patches
git_diff
privacyidea__privacyidea-3091
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bad error handling in /ttype/<type> endpoint We observed a bad error handling when requesting the `/ttype/<type>` endpoint. In specific, we faced the following error: ``` [ERROR][privacyidea.app:1892] Exception on /ttype/push"}. [GET] ... AttributeError: 'NoneType' object has no attribute 'api_endpoint' ``` Actually, we could fix the problem but it would be nice to fix this upstream right away. ### Top-level intent Access the `/ttype/<type>` endpoint. ### Steps to reproduce 1. Query `/ttype/test` endpoint 2. There will be a NoneType error in the logs. ### Expected outcome Proper error handling ### Actual outcome NoneType exception. ### Configuration * **privacyIDEA version**: v3.6.3 * **Installation method**: (from Ubuntu packages, github, PyPI, ...) * **Python version**: 3 * **Operating system**: linux * **Webserver**: apache ### Log file **Set PI_LOGLEVEL = logging.DEBUG in pi.cfg and take a look at the privacyidea.log!** **If appropriate, attach the log file or paste relevant portions.** </issue> <code> [start of privacyidea/api/ttype.py] 1 # -*- coding: utf-8 -*- 2 # 3 # http://www.privacyidea.org 4 # (c) Cornelius Kölbel, privacyidea.org 5 # 6 # 2015-09-01 Cornelius Kölbel, <[email protected]> 7 # Initial writeup 8 # 9 # This code is free software; you can redistribute it and/or 10 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE 11 # License as published by the Free Software Foundation; either 12 # version 3 of the License, or any later version. 13 # 14 # This code is distributed in the hope that it will be useful, 15 # but WITHOUT ANY WARRANTY; without even the implied warranty of 16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 # GNU AFFERO GENERAL PUBLIC LICENSE for more details. 18 # 19 # You should have received a copy of the GNU Affero General Public 20 # License along with this program. If not, see <http://www.gnu.org/licenses/>. 21 # 22 """ 23 This API endpoint is a generic endpoint that can be used by any token 24 type. 25 26 The tokentype needs to implement a classmethod *api_endpoint* and can then be 27 called by /ttype/<tokentype>. 28 This way, each tokentype can create its own API without the need to change 29 the core API. 30 31 The TiQR Token uses this API to implement its special functionalities. See 32 :ref:`code_tiqr_token`. 33 """ 34 from flask import (Blueprint, 35 request) 36 from .lib.utils import getParam 37 from ..lib.log import log_with 38 from flask import g, jsonify, current_app 39 import logging 40 from privacyidea.api.lib.utils import get_all_params 41 from privacyidea.lib.policy import PolicyClass 42 from privacyidea.lib.audit import getAudit 43 from privacyidea.lib.config import (get_token_class, get_from_config, 44 SYSCONF, ensure_no_config_object) 45 from privacyidea.lib.user import get_user_from_param 46 from privacyidea.lib.utils import get_client_ip 47 import json 48 49 log = logging.getLogger(__name__) 50 51 ttype_blueprint = Blueprint('ttype_blueprint', __name__) 52 53 54 @ttype_blueprint.before_request 55 def before_request(): 56 """ 57 This is executed before the request 58 """ 59 ensure_no_config_object() 60 request.all_data = get_all_params(request) 61 privacyidea_server = current_app.config.get("PI_AUDIT_SERVERNAME") or \ 62 request.host 63 # Create a policy_object, that reads the database audit settings 64 # and contains the complete policy definition during the request. 65 # This audit_object can be used in the postpolicy and prepolicy and it 66 # can be passed to the innerpolicies. 67 g.policy_object = PolicyClass() 68 g.audit_object = getAudit(current_app.config) 69 # access_route contains the ip adresses of all clients, hops and proxies. 70 g.client_ip = get_client_ip(request, 71 get_from_config(SYSCONF.OVERRIDECLIENT)) 72 g.serial = getParam(request.all_data, "serial") or None 73 g.audit_object.log({"success": False, 74 "action_detail": "", 75 "client": g.client_ip, 76 "client_user_agent": request.user_agent.browser, 77 "privacyidea_server": privacyidea_server, 78 "action": "{0!s} {1!s}".format(request.method, request.url_rule), 79 "info": ""}) 80 81 82 @ttype_blueprint.route('/<ttype>', methods=['POST', 'GET']) 83 @log_with(log) 84 def token(ttype=None): 85 """ 86 This is a special token function. Each token type can define an 87 additional API call, that does not need authentication on the REST API 88 level. 89 90 :return: Token Type dependent 91 """ 92 tokenc = get_token_class(ttype) 93 res = tokenc.api_endpoint(request, g) 94 serial = getParam(request.all_data, "serial") 95 user = get_user_from_param(request.all_data) 96 g.audit_object.log({"success": 1, 97 "user": user.login, 98 "realm": user.realm, 99 "serial": serial, 100 "token_type": ttype}) 101 if res[0] == "json": 102 return jsonify(res[1]) 103 elif res[0] in ["html", "plain"]: 104 return current_app.response_class(res[1], mimetype="text/{0!s}".format(res[0])) 105 elif len(res) == 2: 106 return current_app.response_class(json.dumps(res[1]), 107 mimetype="application/{0!s}".format(res[0])) 108 else: 109 return current_app.response_class(res[1], mimetype="application/octet-binary", 110 headers=res[2]) 111 [end of privacyidea/api/ttype.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/privacyidea/api/ttype.py b/privacyidea/api/ttype.py --- a/privacyidea/api/ttype.py +++ b/privacyidea/api/ttype.py @@ -38,6 +38,7 @@ from flask import g, jsonify, current_app import logging from privacyidea.api.lib.utils import get_all_params +from privacyidea.lib.error import ParameterError from privacyidea.lib.policy import PolicyClass from privacyidea.lib.audit import getAudit from privacyidea.lib.config import (get_token_class, get_from_config, @@ -90,6 +91,9 @@ :return: Token Type dependent """ tokenc = get_token_class(ttype) + if tokenc is None: + log.error(u"Invalid tokentype provided. ttype: {}".format(ttype.lower())) + raise ParameterError(u"Invalid tokentype provided. ttype: {}".format(ttype.lower())) res = tokenc.api_endpoint(request, g) serial = getParam(request.all_data, "serial") user = get_user_from_param(request.all_data)
{"golden_diff": "diff --git a/privacyidea/api/ttype.py b/privacyidea/api/ttype.py\n--- a/privacyidea/api/ttype.py\n+++ b/privacyidea/api/ttype.py\n@@ -38,6 +38,7 @@\n from flask import g, jsonify, current_app\n import logging\n from privacyidea.api.lib.utils import get_all_params\n+from privacyidea.lib.error import ParameterError\n from privacyidea.lib.policy import PolicyClass\n from privacyidea.lib.audit import getAudit\n from privacyidea.lib.config import (get_token_class, get_from_config,\n@@ -90,6 +91,9 @@\n :return: Token Type dependent\n \"\"\"\n tokenc = get_token_class(ttype)\n+ if tokenc is None:\n+ log.error(u\"Invalid tokentype provided. ttype: {}\".format(ttype.lower()))\n+ raise ParameterError(u\"Invalid tokentype provided. ttype: {}\".format(ttype.lower()))\n res = tokenc.api_endpoint(request, g)\n serial = getParam(request.all_data, \"serial\")\n user = get_user_from_param(request.all_data)\n", "issue": "Bad error handling in /ttype/<type> endpoint\nWe observed a bad error handling when requesting the `/ttype/<type>` endpoint.\r\n\r\nIn specific, we faced the following error:\r\n```\r\n[ERROR][privacyidea.app:1892] Exception on /ttype/push\"}. [GET]\r\n...\r\nAttributeError: 'NoneType' object has no attribute 'api_endpoint'\r\n```\r\nActually, we could fix the problem but it would be nice to fix this upstream right away.\r\n\r\n### Top-level intent\r\n\r\nAccess the `/ttype/<type>` endpoint.\r\n\r\n### Steps to reproduce\r\n\r\n1. Query `/ttype/test` endpoint\r\n2. There will be a NoneType error in the logs.\r\n\r\n### Expected outcome\r\n\r\nProper error handling\r\n\r\n### Actual outcome\r\n\r\nNoneType exception.\r\n\r\n### Configuration\r\n\r\n* **privacyIDEA version**: v3.6.3\r\n* **Installation method**: (from Ubuntu packages, github, PyPI, ...)\r\n* **Python version**: 3\r\n* **Operating system**: linux\r\n* **Webserver**: apache\r\n\r\n\r\n### Log file\r\n\r\n**Set PI_LOGLEVEL = logging.DEBUG in pi.cfg and take a look at the privacyidea.log!**\r\n**If appropriate, attach the log file or paste relevant portions.**\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# http://www.privacyidea.org\n# (c) Cornelius K\u00f6lbel, privacyidea.org\n#\n# 2015-09-01 Cornelius K\u00f6lbel, <[email protected]>\n# Initial writeup\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThis API endpoint is a generic endpoint that can be used by any token\ntype.\n\nThe tokentype needs to implement a classmethod *api_endpoint* and can then be\ncalled by /ttype/<tokentype>.\nThis way, each tokentype can create its own API without the need to change\nthe core API.\n\nThe TiQR Token uses this API to implement its special functionalities. See\n:ref:`code_tiqr_token`.\n\"\"\"\nfrom flask import (Blueprint,\n request)\nfrom .lib.utils import getParam\nfrom ..lib.log import log_with\nfrom flask import g, jsonify, current_app\nimport logging\nfrom privacyidea.api.lib.utils import get_all_params\nfrom privacyidea.lib.policy import PolicyClass\nfrom privacyidea.lib.audit import getAudit\nfrom privacyidea.lib.config import (get_token_class, get_from_config,\n SYSCONF, ensure_no_config_object)\nfrom privacyidea.lib.user import get_user_from_param\nfrom privacyidea.lib.utils import get_client_ip\nimport json\n\nlog = logging.getLogger(__name__)\n\nttype_blueprint = Blueprint('ttype_blueprint', __name__)\n\n\n@ttype_blueprint.before_request\ndef before_request():\n \"\"\"\n This is executed before the request\n \"\"\"\n ensure_no_config_object()\n request.all_data = get_all_params(request)\n privacyidea_server = current_app.config.get(\"PI_AUDIT_SERVERNAME\") or \\\n request.host\n # Create a policy_object, that reads the database audit settings\n # and contains the complete policy definition during the request.\n # This audit_object can be used in the postpolicy and prepolicy and it\n # can be passed to the innerpolicies.\n g.policy_object = PolicyClass()\n g.audit_object = getAudit(current_app.config)\n # access_route contains the ip adresses of all clients, hops and proxies.\n g.client_ip = get_client_ip(request,\n get_from_config(SYSCONF.OVERRIDECLIENT))\n g.serial = getParam(request.all_data, \"serial\") or None\n g.audit_object.log({\"success\": False,\n \"action_detail\": \"\",\n \"client\": g.client_ip,\n \"client_user_agent\": request.user_agent.browser,\n \"privacyidea_server\": privacyidea_server,\n \"action\": \"{0!s} {1!s}\".format(request.method, request.url_rule),\n \"info\": \"\"})\n\n\n@ttype_blueprint.route('/<ttype>', methods=['POST', 'GET'])\n@log_with(log)\ndef token(ttype=None):\n \"\"\"\n This is a special token function. Each token type can define an\n additional API call, that does not need authentication on the REST API\n level.\n\n :return: Token Type dependent\n \"\"\"\n tokenc = get_token_class(ttype)\n res = tokenc.api_endpoint(request, g)\n serial = getParam(request.all_data, \"serial\")\n user = get_user_from_param(request.all_data)\n g.audit_object.log({\"success\": 1,\n \"user\": user.login,\n \"realm\": user.realm,\n \"serial\": serial,\n \"token_type\": ttype})\n if res[0] == \"json\":\n return jsonify(res[1])\n elif res[0] in [\"html\", \"plain\"]:\n return current_app.response_class(res[1], mimetype=\"text/{0!s}\".format(res[0]))\n elif len(res) == 2:\n return current_app.response_class(json.dumps(res[1]),\n mimetype=\"application/{0!s}\".format(res[0]))\n else:\n return current_app.response_class(res[1], mimetype=\"application/octet-binary\",\n headers=res[2])\n", "path": "privacyidea/api/ttype.py"}]}
2,007
238
gh_patches_debug_790
rasdani/github-patches
git_diff
ibis-project__ibis-8364
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> bug: `Scalar.isin(Column)` returns a Column, not a Scalar ### What happened? ```python import ibis needle = ibis.literal(2) haystack = ibis.memtable({"x": [1, 2, 3]}).x type(needle.isin(haystack)) # ibis.expr.types.logical.BooleanColumn ``` ### What version of ibis are you using? main ### What backend(s) are you using, if any? _No response_ ### Relevant log output _No response_ ### Code of Conduct - [X] I agree to follow this project's Code of Conduct </issue> <code> [start of ibis/expr/operations/subqueries.py] 1 from __future__ import annotations 2 3 from public import public 4 5 import ibis.expr.datashape as ds 6 import ibis.expr.datatypes as dt 7 import ibis.expr.rules as rlz 8 from ibis.common.annotations import attribute 9 from ibis.common.exceptions import IntegrityError 10 from ibis.expr.operations.core import Value 11 from ibis.expr.operations.relations import Relation # noqa: TCH001 12 13 14 @public 15 class Subquery(Value): 16 rel: Relation 17 18 @attribute 19 def relations(self): 20 return frozenset() 21 22 23 @public 24 class ExistsSubquery(Subquery): 25 dtype = dt.boolean 26 shape = ds.columnar 27 28 29 @public 30 class ScalarSubquery(Subquery): 31 shape = ds.scalar 32 33 def __init__(self, rel): 34 if len(rel.schema) != 1: 35 raise IntegrityError( 36 "Relation passed to ScalarSubquery() must have exactly one " 37 f"column, got {len(rel.schema)}" 38 ) 39 super().__init__(rel=rel) 40 41 @attribute 42 def value(self): 43 (value,) = self.rel.values.values() 44 return value 45 46 @attribute 47 def dtype(self): 48 return self.value.dtype 49 50 51 @public 52 class InSubquery(Subquery): 53 needle: Value 54 55 dtype = dt.boolean 56 shape = ds.columnar 57 58 def __init__(self, rel, needle): 59 if len(rel.schema) != 1: 60 raise IntegrityError( 61 "Relation passed to InSubquery() must have exactly one " 62 f"column, got {len(rel.schema)}" 63 ) 64 (value,) = rel.values.values() 65 if not rlz.comparable(value, needle): 66 raise IntegrityError(f"{needle!r} is not comparable to {value!r}") 67 super().__init__(rel=rel, needle=needle) 68 69 @attribute 70 def value(self): 71 (value,) = self.rel.values.values() 72 return value 73 74 @attribute 75 def relations(self): 76 return self.needle.relations 77 [end of ibis/expr/operations/subqueries.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ibis/expr/operations/subqueries.py b/ibis/expr/operations/subqueries.py --- a/ibis/expr/operations/subqueries.py +++ b/ibis/expr/operations/subqueries.py @@ -53,7 +53,7 @@ needle: Value dtype = dt.boolean - shape = ds.columnar + shape = rlz.shape_like("needle") def __init__(self, rel, needle): if len(rel.schema) != 1:
{"golden_diff": "diff --git a/ibis/expr/operations/subqueries.py b/ibis/expr/operations/subqueries.py\n--- a/ibis/expr/operations/subqueries.py\n+++ b/ibis/expr/operations/subqueries.py\n@@ -53,7 +53,7 @@\n needle: Value\n \n dtype = dt.boolean\n- shape = ds.columnar\n+ shape = rlz.shape_like(\"needle\")\n \n def __init__(self, rel, needle):\n if len(rel.schema) != 1:\n", "issue": "bug: `Scalar.isin(Column)` returns a Column, not a Scalar\n### What happened?\n\n```python\r\nimport ibis\r\n\r\nneedle = ibis.literal(2)\r\nhaystack = ibis.memtable({\"x\": [1, 2, 3]}).x\r\ntype(needle.isin(haystack))\r\n# ibis.expr.types.logical.BooleanColumn\r\n```\n\n### What version of ibis are you using?\n\nmain\n\n### What backend(s) are you using, if any?\n\n_No response_\n\n### Relevant log output\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom public import public\n\nimport ibis.expr.datashape as ds\nimport ibis.expr.datatypes as dt\nimport ibis.expr.rules as rlz\nfrom ibis.common.annotations import attribute\nfrom ibis.common.exceptions import IntegrityError\nfrom ibis.expr.operations.core import Value\nfrom ibis.expr.operations.relations import Relation # noqa: TCH001\n\n\n@public\nclass Subquery(Value):\n rel: Relation\n\n @attribute\n def relations(self):\n return frozenset()\n\n\n@public\nclass ExistsSubquery(Subquery):\n dtype = dt.boolean\n shape = ds.columnar\n\n\n@public\nclass ScalarSubquery(Subquery):\n shape = ds.scalar\n\n def __init__(self, rel):\n if len(rel.schema) != 1:\n raise IntegrityError(\n \"Relation passed to ScalarSubquery() must have exactly one \"\n f\"column, got {len(rel.schema)}\"\n )\n super().__init__(rel=rel)\n\n @attribute\n def value(self):\n (value,) = self.rel.values.values()\n return value\n\n @attribute\n def dtype(self):\n return self.value.dtype\n\n\n@public\nclass InSubquery(Subquery):\n needle: Value\n\n dtype = dt.boolean\n shape = ds.columnar\n\n def __init__(self, rel, needle):\n if len(rel.schema) != 1:\n raise IntegrityError(\n \"Relation passed to InSubquery() must have exactly one \"\n f\"column, got {len(rel.schema)}\"\n )\n (value,) = rel.values.values()\n if not rlz.comparable(value, needle):\n raise IntegrityError(f\"{needle!r} is not comparable to {value!r}\")\n super().__init__(rel=rel, needle=needle)\n\n @attribute\n def value(self):\n (value,) = self.rel.values.values()\n return value\n\n @attribute\n def relations(self):\n return self.needle.relations\n", "path": "ibis/expr/operations/subqueries.py"}]}
1,262
117
gh_patches_debug_22414
rasdani/github-patches
git_diff
translate__pootle-6485
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Languages in languages drop down menu are messed Hi, the languages in the languages drop down menu are in a pretty mess now. It seems that they are not sorted anymore now, neither by language name nor by locale. Regards, Michael </issue> <code> [start of pootle/core/views/base.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 from django.urls import reverse 10 from django.utils.decorators import method_decorator 11 from django.utils.functional import cached_property 12 from django.utils.translation import get_language 13 from django.views.decorators.cache import never_cache 14 from django.views.generic import DetailView 15 16 from pootle.core.delegate import site_languages 17 from pootle.core.url_helpers import get_path_parts 18 from pootle.i18n.gettext import ugettext as _ 19 from pootle_app.models.permissions import check_permission 20 from pootle_misc.util import ajax_required 21 22 from .decorators import requires_permission, set_permissions 23 from .mixins import GatherContextMixin, PootleJSONMixin 24 25 26 class PootleDetailView(GatherContextMixin, DetailView): 27 translate_url_path = "" 28 browse_url_path = "" 29 resource_path = "" 30 view_name = "" 31 sw_version = 0 32 ns = "pootle.core" 33 34 @property 35 def browse_url(self): 36 return reverse( 37 self.browse_url_path, 38 kwargs=self.url_kwargs) 39 40 @property 41 def cache_key(self): 42 return ( 43 "%s.%s.%s.%s" 44 % (self.page_name, 45 self.view_name, 46 self.object.data_tool.cache_key, 47 self.request_lang)) 48 49 @property 50 def request_lang(self): 51 return get_language() 52 53 @cached_property 54 def has_admin_access(self): 55 return check_permission('administrate', self.request) 56 57 @property 58 def language(self): 59 if self.tp: 60 return self.tp.language 61 62 @property 63 def permission_context(self): 64 return self.get_object() 65 66 @property 67 def pootle_path(self): 68 return self.object.pootle_path 69 70 @property 71 def project(self): 72 if self.tp: 73 return self.tp.project 74 75 @property 76 def tp(self): 77 return None 78 79 @property 80 def translate_url(self): 81 return reverse( 82 self.translate_url_path, 83 kwargs=self.url_kwargs) 84 85 @set_permissions 86 @requires_permission("view") 87 def dispatch(self, request, *args, **kwargs): 88 # get funky with the request 8/ 89 return super(PootleDetailView, self).dispatch(request, *args, **kwargs) 90 91 @property 92 def languages(self): 93 languages = site_languages.get() 94 return ( 95 languages.all_languages 96 if self.has_admin_access 97 else languages.languages) 98 99 def get_context_data(self, *args, **kwargs): 100 return { 101 'object': self.object, 102 'pootle_path': self.pootle_path, 103 'project': self.project, 104 'language': self.language, 105 "all_languages": self.languages, 106 'translation_project': self.tp, 107 'has_admin_access': self.has_admin_access, 108 'resource_path': self.resource_path, 109 'resource_path_parts': get_path_parts(self.resource_path), 110 'translate_url': self.translate_url, 111 'browse_url': self.browse_url, 112 'paths_placeholder': _("Entire Project"), 113 'unit_api_root': "/xhr/units/"} 114 115 116 class PootleJSON(PootleJSONMixin, PootleDetailView): 117 118 @never_cache 119 @method_decorator(ajax_required) 120 @set_permissions 121 @requires_permission("view") 122 def dispatch(self, request, *args, **kwargs): 123 return super(PootleJSON, self).dispatch(request, *args, **kwargs) 124 125 126 class PootleAdminView(DetailView): 127 128 @set_permissions 129 @requires_permission("administrate") 130 def dispatch(self, request, *args, **kwargs): 131 return super(PootleAdminView, self).dispatch(request, *args, **kwargs) 132 133 @property 134 def permission_context(self): 135 return self.get_object().directory 136 137 def post(self, *args, **kwargs): 138 return self.get(*args, **kwargs) 139 [end of pootle/core/views/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pootle/core/views/base.py b/pootle/core/views/base.py --- a/pootle/core/views/base.py +++ b/pootle/core/views/base.py @@ -6,6 +6,8 @@ # or later license. See the LICENSE file for a copy of the license and the # AUTHORS file for copyright and authorship information. +from collections import OrderedDict + from django.urls import reverse from django.utils.decorators import method_decorator from django.utils.functional import cached_property @@ -91,10 +93,18 @@ @property def languages(self): languages = site_languages.get() - return ( + languages = ( languages.all_languages if self.has_admin_access else languages.languages) + lang_map = { + v: k + for k, v + in languages.items()} + return OrderedDict( + (lang_map[v], v) + for v + in sorted(languages.values())) def get_context_data(self, *args, **kwargs): return {
{"golden_diff": "diff --git a/pootle/core/views/base.py b/pootle/core/views/base.py\n--- a/pootle/core/views/base.py\n+++ b/pootle/core/views/base.py\n@@ -6,6 +6,8 @@\n # or later license. See the LICENSE file for a copy of the license and the\n # AUTHORS file for copyright and authorship information.\n \n+from collections import OrderedDict\n+\n from django.urls import reverse\n from django.utils.decorators import method_decorator\n from django.utils.functional import cached_property\n@@ -91,10 +93,18 @@\n @property\n def languages(self):\n languages = site_languages.get()\n- return (\n+ languages = (\n languages.all_languages\n if self.has_admin_access\n else languages.languages)\n+ lang_map = {\n+ v: k\n+ for k, v\n+ in languages.items()}\n+ return OrderedDict(\n+ (lang_map[v], v)\n+ for v\n+ in sorted(languages.values()))\n \n def get_context_data(self, *args, **kwargs):\n return {\n", "issue": "Languages in languages drop down menu are messed\nHi,\r\n\r\nthe languages in the languages drop down menu are in a pretty mess now. It seems that they are not sorted anymore now, neither by language name nor by locale.\r\n\r\nRegards,\r\nMichael\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.urls import reverse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import get_language\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic import DetailView\n\nfrom pootle.core.delegate import site_languages\nfrom pootle.core.url_helpers import get_path_parts\nfrom pootle.i18n.gettext import ugettext as _\nfrom pootle_app.models.permissions import check_permission\nfrom pootle_misc.util import ajax_required\n\nfrom .decorators import requires_permission, set_permissions\nfrom .mixins import GatherContextMixin, PootleJSONMixin\n\n\nclass PootleDetailView(GatherContextMixin, DetailView):\n translate_url_path = \"\"\n browse_url_path = \"\"\n resource_path = \"\"\n view_name = \"\"\n sw_version = 0\n ns = \"pootle.core\"\n\n @property\n def browse_url(self):\n return reverse(\n self.browse_url_path,\n kwargs=self.url_kwargs)\n\n @property\n def cache_key(self):\n return (\n \"%s.%s.%s.%s\"\n % (self.page_name,\n self.view_name,\n self.object.data_tool.cache_key,\n self.request_lang))\n\n @property\n def request_lang(self):\n return get_language()\n\n @cached_property\n def has_admin_access(self):\n return check_permission('administrate', self.request)\n\n @property\n def language(self):\n if self.tp:\n return self.tp.language\n\n @property\n def permission_context(self):\n return self.get_object()\n\n @property\n def pootle_path(self):\n return self.object.pootle_path\n\n @property\n def project(self):\n if self.tp:\n return self.tp.project\n\n @property\n def tp(self):\n return None\n\n @property\n def translate_url(self):\n return reverse(\n self.translate_url_path,\n kwargs=self.url_kwargs)\n\n @set_permissions\n @requires_permission(\"view\")\n def dispatch(self, request, *args, **kwargs):\n # get funky with the request 8/\n return super(PootleDetailView, self).dispatch(request, *args, **kwargs)\n\n @property\n def languages(self):\n languages = site_languages.get()\n return (\n languages.all_languages\n if self.has_admin_access\n else languages.languages)\n\n def get_context_data(self, *args, **kwargs):\n return {\n 'object': self.object,\n 'pootle_path': self.pootle_path,\n 'project': self.project,\n 'language': self.language,\n \"all_languages\": self.languages,\n 'translation_project': self.tp,\n 'has_admin_access': self.has_admin_access,\n 'resource_path': self.resource_path,\n 'resource_path_parts': get_path_parts(self.resource_path),\n 'translate_url': self.translate_url,\n 'browse_url': self.browse_url,\n 'paths_placeholder': _(\"Entire Project\"),\n 'unit_api_root': \"/xhr/units/\"}\n\n\nclass PootleJSON(PootleJSONMixin, PootleDetailView):\n\n @never_cache\n @method_decorator(ajax_required)\n @set_permissions\n @requires_permission(\"view\")\n def dispatch(self, request, *args, **kwargs):\n return super(PootleJSON, self).dispatch(request, *args, **kwargs)\n\n\nclass PootleAdminView(DetailView):\n\n @set_permissions\n @requires_permission(\"administrate\")\n def dispatch(self, request, *args, **kwargs):\n return super(PootleAdminView, self).dispatch(request, *args, **kwargs)\n\n @property\n def permission_context(self):\n return self.get_object().directory\n\n def post(self, *args, **kwargs):\n return self.get(*args, **kwargs)\n", "path": "pootle/core/views/base.py"}]}
1,800
234
gh_patches_debug_27678
rasdani/github-patches
git_diff
mozilla__bugbug-854
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add an option to the bug_classifier script to download the model when it doesn't exist </issue> <code> [start of scripts/bug_classifier.py] 1 # -*- coding: utf-8 -*- 2 3 import argparse 4 import os 5 6 import numpy as np 7 8 from bugbug import bugzilla 9 from bugbug.models import get_model_class 10 11 MODELS_WITH_TYPE = ("component",) 12 13 14 def classify_bugs(model_name, classifier): 15 if classifier != "default": 16 assert ( 17 model_name in MODELS_WITH_TYPE 18 ), f"{classifier} is not a valid classifier type for {model_name}" 19 20 model_file_name = f"{model_name}{classifier}model" 21 model_name = f"{model_name}_{classifier}" 22 else: 23 model_file_name = f"{model_name}model" 24 25 assert os.path.exists( 26 model_file_name 27 ), f"{model_file_name} does not exist. Train the model with trainer.py first." 28 29 model_class = get_model_class(model_name) 30 model = model_class.load(model_file_name) 31 32 for bug in bugzilla.get_bugs(): 33 print( 34 f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug["id"]} - {bug["summary"]} ' 35 ) 36 37 if model.calculate_importance: 38 probas, importance = model.classify( 39 bug, probabilities=True, importances=True 40 ) 41 42 feature_names = model.get_human_readable_feature_names() 43 44 model.print_feature_importances( 45 importance["importances"], feature_names, class_probabilities=probas 46 ) 47 else: 48 probas = model.classify(bug, probabilities=True, importances=False) 49 50 if np.argmax(probas) == 1: 51 print(f"Positive! {probas}") 52 else: 53 print(f"Negative! {probas}") 54 input() 55 56 57 def main(): 58 description = "Perform evaluation on bugs using the specified model" 59 parser = argparse.ArgumentParser(description=description) 60 61 parser.add_argument("model", help="Which model to use for evaluation") 62 parser.add_argument( 63 "--classifier", 64 help="Type of the classifier. Only used for component classification.", 65 choices=["default", "nn"], 66 default="default", 67 ) 68 69 args = parser.parse_args() 70 71 classify_bugs(args.model, args.classifier) 72 [end of scripts/bug_classifier.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/bug_classifier.py b/scripts/bug_classifier.py --- a/scripts/bug_classifier.py +++ b/scripts/bug_classifier.py @@ -2,14 +2,20 @@ import argparse import os +from logging import INFO, basicConfig, getLogger import numpy as np +import requests from bugbug import bugzilla from bugbug.models import get_model_class +from bugbug.utils import download_check_etag, zstd_decompress MODELS_WITH_TYPE = ("component",) +basicConfig(level=INFO) +logger = getLogger(__name__) + def classify_bugs(model_name, classifier): if classifier != "default": @@ -22,9 +28,21 @@ else: model_file_name = f"{model_name}model" - assert os.path.exists( - model_file_name - ), f"{model_file_name} does not exist. Train the model with trainer.py first." + if not os.path.exists(model_file_name): + logger.info(f"{model_file_name} does not exist. Downloading the model....") + try: + download_check_etag( + f"https://index.taskcluster.net/v1/task/project.relman.bugbug.train_{model_name}.latest/artifacts/public/{model_file_name}.zst", + f"{model_file_name}.zst", + ) + except requests.HTTPError: + logger.error( + f"A pre-trained model is not available, you will need to train it yourself using the trainer script" + ) + raise SystemExit(1) + + zstd_decompress(model_file_name) + assert os.path.exists(model_file_name), "Decompressed file doesn't exist" model_class = get_model_class(model_name) model = model_class.load(model_file_name)
{"golden_diff": "diff --git a/scripts/bug_classifier.py b/scripts/bug_classifier.py\n--- a/scripts/bug_classifier.py\n+++ b/scripts/bug_classifier.py\n@@ -2,14 +2,20 @@\n \n import argparse\n import os\n+from logging import INFO, basicConfig, getLogger\n \n import numpy as np\n+import requests\n \n from bugbug import bugzilla\n from bugbug.models import get_model_class\n+from bugbug.utils import download_check_etag, zstd_decompress\n \n MODELS_WITH_TYPE = (\"component\",)\n \n+basicConfig(level=INFO)\n+logger = getLogger(__name__)\n+\n \n def classify_bugs(model_name, classifier):\n if classifier != \"default\":\n@@ -22,9 +28,21 @@\n else:\n model_file_name = f\"{model_name}model\"\n \n- assert os.path.exists(\n- model_file_name\n- ), f\"{model_file_name} does not exist. Train the model with trainer.py first.\"\n+ if not os.path.exists(model_file_name):\n+ logger.info(f\"{model_file_name} does not exist. Downloading the model....\")\n+ try:\n+ download_check_etag(\n+ f\"https://index.taskcluster.net/v1/task/project.relman.bugbug.train_{model_name}.latest/artifacts/public/{model_file_name}.zst\",\n+ f\"{model_file_name}.zst\",\n+ )\n+ except requests.HTTPError:\n+ logger.error(\n+ f\"A pre-trained model is not available, you will need to train it yourself using the trainer script\"\n+ )\n+ raise SystemExit(1)\n+\n+ zstd_decompress(model_file_name)\n+ assert os.path.exists(model_file_name), \"Decompressed file doesn't exist\"\n \n model_class = get_model_class(model_name)\n model = model_class.load(model_file_name)\n", "issue": "Add an option to the bug_classifier script to download the model when it doesn't exist\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\n\nimport numpy as np\n\nfrom bugbug import bugzilla\nfrom bugbug.models import get_model_class\n\nMODELS_WITH_TYPE = (\"component\",)\n\n\ndef classify_bugs(model_name, classifier):\n if classifier != \"default\":\n assert (\n model_name in MODELS_WITH_TYPE\n ), f\"{classifier} is not a valid classifier type for {model_name}\"\n\n model_file_name = f\"{model_name}{classifier}model\"\n model_name = f\"{model_name}_{classifier}\"\n else:\n model_file_name = f\"{model_name}model\"\n\n assert os.path.exists(\n model_file_name\n ), f\"{model_file_name} does not exist. Train the model with trainer.py first.\"\n\n model_class = get_model_class(model_name)\n model = model_class.load(model_file_name)\n\n for bug in bugzilla.get_bugs():\n print(\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]} - {bug[\"summary\"]} '\n )\n\n if model.calculate_importance:\n probas, importance = model.classify(\n bug, probabilities=True, importances=True\n )\n\n feature_names = model.get_human_readable_feature_names()\n\n model.print_feature_importances(\n importance[\"importances\"], feature_names, class_probabilities=probas\n )\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n\n if np.argmax(probas) == 1:\n print(f\"Positive! {probas}\")\n else:\n print(f\"Negative! {probas}\")\n input()\n\n\ndef main():\n description = \"Perform evaluation on bugs using the specified model\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to use for evaluation\")\n parser.add_argument(\n \"--classifier\",\n help=\"Type of the classifier. Only used for component classification.\",\n choices=[\"default\", \"nn\"],\n default=\"default\",\n )\n\n args = parser.parse_args()\n\n classify_bugs(args.model, args.classifier)\n", "path": "scripts/bug_classifier.py"}]}
1,146
397
gh_patches_debug_8135
rasdani/github-patches
git_diff
GeotrekCE__Geotrek-admin-1047
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Show land module in left menus As we said it would be interesting to move it to its own menu, we should take opportunity to rename some elements : Module name : "Gestion foncière" TO "Gestionnaires" AND ALSO : "Zone de compétence" TO "Compétence sentiers" </issue> <code> [start of geotrek/land/urls.py] 1 from mapentity import registry 2 3 from . import models 4 5 6 urlpatterns = registry.register(models.PhysicalEdge, menu=False) 7 urlpatterns += registry.register(models.LandEdge, menu=False) 8 urlpatterns += registry.register(models.CompetenceEdge, menu=False) 9 urlpatterns += registry.register(models.WorkManagementEdge, menu=False) 10 urlpatterns += registry.register(models.SignageManagementEdge, menu=False) 11 [end of geotrek/land/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/geotrek/land/urls.py b/geotrek/land/urls.py --- a/geotrek/land/urls.py +++ b/geotrek/land/urls.py @@ -4,7 +4,7 @@ urlpatterns = registry.register(models.PhysicalEdge, menu=False) -urlpatterns += registry.register(models.LandEdge, menu=False) +urlpatterns += registry.register(models.LandEdge) urlpatterns += registry.register(models.CompetenceEdge, menu=False) urlpatterns += registry.register(models.WorkManagementEdge, menu=False) urlpatterns += registry.register(models.SignageManagementEdge, menu=False)
{"golden_diff": "diff --git a/geotrek/land/urls.py b/geotrek/land/urls.py\n--- a/geotrek/land/urls.py\n+++ b/geotrek/land/urls.py\n@@ -4,7 +4,7 @@\n \n \n urlpatterns = registry.register(models.PhysicalEdge, menu=False)\n-urlpatterns += registry.register(models.LandEdge, menu=False)\n+urlpatterns += registry.register(models.LandEdge)\n urlpatterns += registry.register(models.CompetenceEdge, menu=False)\n urlpatterns += registry.register(models.WorkManagementEdge, menu=False)\n urlpatterns += registry.register(models.SignageManagementEdge, menu=False)\n", "issue": "Show land module in left menus\nAs we said it would be interesting to move it to its own menu, we should take opportunity to rename some elements : \n\nModule name : \n\"Gestion fonci\u00e8re\" TO \"Gestionnaires\"\nAND ALSO : \n\"Zone de comp\u00e9tence\" TO \"Comp\u00e9tence sentiers\"\n\n", "before_files": [{"content": "from mapentity import registry\n\nfrom . import models\n\n\nurlpatterns = registry.register(models.PhysicalEdge, menu=False)\nurlpatterns += registry.register(models.LandEdge, menu=False)\nurlpatterns += registry.register(models.CompetenceEdge, menu=False)\nurlpatterns += registry.register(models.WorkManagementEdge, menu=False)\nurlpatterns += registry.register(models.SignageManagementEdge, menu=False)\n", "path": "geotrek/land/urls.py"}]}
697
129
gh_patches_debug_23375
rasdani/github-patches
git_diff
pypa__setuptools-2863
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [FR] Add integration tests to catch breaking changes in the API ### What's the problem this feature will solve? It would be nice to have integration tests focusing on the usage of setuptools "public API" by some popular packages in the community. This way we can catch breaking changes in the API before publishing new releases ### Describe the solution you'd like According to the discussion in https://github.com/pypa/setuptools/pull/2844, if adding a new "integration test suite", the following characteristics are desirable: 1. It should run separated from the main test suite (integration tests are resource intensive and time consuming, so the best is to avoid always running them and postponing until a new release is ready). 2. It should test how setuptools' API is being used by popular packages in the community to catch rare errors. ### Alternative Solutions _No response_ ### Additional context _No response_ ### Code of Conduct - [X] I agree to follow the PSF Code of Conduct </issue> <code> [start of conftest.py] 1 import sys 2 3 4 pytest_plugins = 'setuptools.tests.fixtures' 5 6 7 def pytest_addoption(parser): 8 parser.addoption( 9 "--package_name", action="append", default=[], 10 help="list of package_name to pass to test functions", 11 ) 12 13 14 collect_ignore = [ 15 'tests/manual_test.py', 16 'setuptools/tests/mod_with_constant.py', 17 'setuptools/_distutils', 18 '_distutils_hack', 19 'setuptools/extern', 20 'pkg_resources/extern', 21 'pkg_resources/tests/data', 22 'setuptools/_vendor', 23 'pkg_resources/_vendor', 24 ] 25 26 27 if sys.version_info < (3, 6): 28 collect_ignore.append('docs/conf.py') # uses f-strings 29 collect_ignore.append('pavement.py') 30 [end of conftest.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conftest.py b/conftest.py --- a/conftest.py +++ b/conftest.py @@ -1,5 +1,7 @@ import sys +import pytest + pytest_plugins = 'setuptools.tests.fixtures' @@ -9,6 +11,14 @@ "--package_name", action="append", default=[], help="list of package_name to pass to test functions", ) + parser.addoption( + "--integration", action="store_true", default=False, + help="run integration tests (only)" + ) + + +def pytest_configure(config): + config.addinivalue_line("markers", "integration: integration tests") collect_ignore = [ @@ -27,3 +37,13 @@ if sys.version_info < (3, 6): collect_ignore.append('docs/conf.py') # uses f-strings collect_ignore.append('pavement.py') + + [email protected](autouse=True) +def _skip_integration(request): + running_integration_tests = request.config.getoption("--integration") + is_integration_test = request.node.get_closest_marker("integration") + if running_integration_tests and not is_integration_test: + pytest.skip("running integration tests only") + if not running_integration_tests and is_integration_test: + pytest.skip("skipping integration tests")
{"golden_diff": "diff --git a/conftest.py b/conftest.py\n--- a/conftest.py\n+++ b/conftest.py\n@@ -1,5 +1,7 @@\n import sys\n \n+import pytest\n+\n \n pytest_plugins = 'setuptools.tests.fixtures'\n \n@@ -9,6 +11,14 @@\n \"--package_name\", action=\"append\", default=[],\n help=\"list of package_name to pass to test functions\",\n )\n+ parser.addoption(\n+ \"--integration\", action=\"store_true\", default=False,\n+ help=\"run integration tests (only)\"\n+ )\n+\n+\n+def pytest_configure(config):\n+ config.addinivalue_line(\"markers\", \"integration: integration tests\")\n \n \n collect_ignore = [\n@@ -27,3 +37,13 @@\n if sys.version_info < (3, 6):\n collect_ignore.append('docs/conf.py') # uses f-strings\n collect_ignore.append('pavement.py')\n+\n+\[email protected](autouse=True)\n+def _skip_integration(request):\n+ running_integration_tests = request.config.getoption(\"--integration\")\n+ is_integration_test = request.node.get_closest_marker(\"integration\")\n+ if running_integration_tests and not is_integration_test:\n+ pytest.skip(\"running integration tests only\")\n+ if not running_integration_tests and is_integration_test:\n+ pytest.skip(\"skipping integration tests\")\n", "issue": "[FR] Add integration tests to catch breaking changes in the API\n### What's the problem this feature will solve?\n\nIt would be nice to have integration tests focusing on the usage of setuptools \"public API\" by some popular packages in the community.\r\n\r\nThis way we can catch breaking changes in the API before publishing new releases\n\n### Describe the solution you'd like\n\nAccording to the discussion in https://github.com/pypa/setuptools/pull/2844, if adding a new \"integration test suite\", the following characteristics are desirable:\r\n\r\n1. It should run separated from the main test suite (integration tests are resource intensive and time consuming, so the best is to avoid always running them and postponing until a new release is ready).\r\n2. It should test how setuptools' API is being used by popular packages in the community to catch rare errors.\n\n### Alternative Solutions\n\n_No response_\n\n### Additional context\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow the PSF Code of Conduct\n", "before_files": [{"content": "import sys\n\n\npytest_plugins = 'setuptools.tests.fixtures'\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--package_name\", action=\"append\", default=[],\n help=\"list of package_name to pass to test functions\",\n )\n\n\ncollect_ignore = [\n 'tests/manual_test.py',\n 'setuptools/tests/mod_with_constant.py',\n 'setuptools/_distutils',\n '_distutils_hack',\n 'setuptools/extern',\n 'pkg_resources/extern',\n 'pkg_resources/tests/data',\n 'setuptools/_vendor',\n 'pkg_resources/_vendor',\n]\n\n\nif sys.version_info < (3, 6):\n collect_ignore.append('docs/conf.py') # uses f-strings\n collect_ignore.append('pavement.py')\n", "path": "conftest.py"}]}
960
300
gh_patches_debug_26462
rasdani/github-patches
git_diff
litestar-org__litestar-2269
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> StaticFilesConfig and virtual directories I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems. https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32 </issue> <code> [start of litestar/contrib/sqlalchemy/plugins/__init__.py] 1 from __future__ import annotations 2 3 from .init import ( 4 AsyncSessionConfig, 5 EngineConfig, 6 GenericSessionConfig, 7 GenericSQLAlchemyConfig, 8 SQLAlchemyAsyncConfig, 9 SQLAlchemyInitPlugin, 10 SQLAlchemySyncConfig, 11 SyncSessionConfig, 12 ) 13 from .serialization import SQLAlchemySerializationPlugin 14 15 16 class SQLAlchemyPlugin(SQLAlchemyInitPlugin, SQLAlchemySerializationPlugin): 17 """A plugin that provides SQLAlchemy integration.""" 18 19 def __init__(self, config: SQLAlchemyAsyncConfig | SQLAlchemySyncConfig) -> None: 20 SQLAlchemyInitPlugin.__init__(self, config=config) 21 SQLAlchemySerializationPlugin.__init__(self) 22 23 24 __all__ = ( 25 "AsyncSessionConfig", 26 "EngineConfig", 27 "GenericSQLAlchemyConfig", 28 "GenericSessionConfig", 29 "SQLAlchemyAsyncConfig", 30 "SQLAlchemyInitPlugin", 31 "SQLAlchemyPlugin", 32 "SQLAlchemySerializationPlugin", 33 "SQLAlchemySyncConfig", 34 "SyncSessionConfig", 35 ) 36 [end of litestar/contrib/sqlalchemy/plugins/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/litestar/contrib/sqlalchemy/plugins/__init__.py b/litestar/contrib/sqlalchemy/plugins/__init__.py --- a/litestar/contrib/sqlalchemy/plugins/__init__.py +++ b/litestar/contrib/sqlalchemy/plugins/__init__.py @@ -1,5 +1,10 @@ from __future__ import annotations +from typing import TYPE_CHECKING + +from litestar.contrib.sqlalchemy.plugins import _slots_base +from litestar.plugins import InitPluginProtocol + from .init import ( AsyncSessionConfig, EngineConfig, @@ -12,13 +17,29 @@ ) from .serialization import SQLAlchemySerializationPlugin +if TYPE_CHECKING: + from litestar.config.app import AppConfig + -class SQLAlchemyPlugin(SQLAlchemyInitPlugin, SQLAlchemySerializationPlugin): +class SQLAlchemyPlugin(InitPluginProtocol, _slots_base.SlotsBase): """A plugin that provides SQLAlchemy integration.""" def __init__(self, config: SQLAlchemyAsyncConfig | SQLAlchemySyncConfig) -> None: - SQLAlchemyInitPlugin.__init__(self, config=config) - SQLAlchemySerializationPlugin.__init__(self) + """Initialize ``SQLAlchemyPlugin``. + + Args: + config: configure DB connection and hook handlers and dependencies. + """ + self._config = config + + def on_app_init(self, app_config: AppConfig) -> AppConfig: + """Configure application for use with SQLAlchemy. + + Args: + app_config: The :class:`AppConfig <.config.app.AppConfig>` instance. + """ + app_config.plugins.extend([SQLAlchemyInitPlugin(config=self._config), SQLAlchemySerializationPlugin()]) + return app_config __all__ = (
{"golden_diff": "diff --git a/litestar/contrib/sqlalchemy/plugins/__init__.py b/litestar/contrib/sqlalchemy/plugins/__init__.py\n--- a/litestar/contrib/sqlalchemy/plugins/__init__.py\n+++ b/litestar/contrib/sqlalchemy/plugins/__init__.py\n@@ -1,5 +1,10 @@\n from __future__ import annotations\n \n+from typing import TYPE_CHECKING\n+\n+from litestar.contrib.sqlalchemy.plugins import _slots_base\n+from litestar.plugins import InitPluginProtocol\n+\n from .init import (\n AsyncSessionConfig,\n EngineConfig,\n@@ -12,13 +17,29 @@\n )\n from .serialization import SQLAlchemySerializationPlugin\n \n+if TYPE_CHECKING:\n+ from litestar.config.app import AppConfig\n+\n \n-class SQLAlchemyPlugin(SQLAlchemyInitPlugin, SQLAlchemySerializationPlugin):\n+class SQLAlchemyPlugin(InitPluginProtocol, _slots_base.SlotsBase):\n \"\"\"A plugin that provides SQLAlchemy integration.\"\"\"\n \n def __init__(self, config: SQLAlchemyAsyncConfig | SQLAlchemySyncConfig) -> None:\n- SQLAlchemyInitPlugin.__init__(self, config=config)\n- SQLAlchemySerializationPlugin.__init__(self)\n+ \"\"\"Initialize ``SQLAlchemyPlugin``.\n+\n+ Args:\n+ config: configure DB connection and hook handlers and dependencies.\n+ \"\"\"\n+ self._config = config\n+\n+ def on_app_init(self, app_config: AppConfig) -> AppConfig:\n+ \"\"\"Configure application for use with SQLAlchemy.\n+\n+ Args:\n+ app_config: The :class:`AppConfig <.config.app.AppConfig>` instance.\n+ \"\"\"\n+ app_config.plugins.extend([SQLAlchemyInitPlugin(config=self._config), SQLAlchemySerializationPlugin()])\n+ return app_config\n \n \n __all__ = (\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom .init import (\n AsyncSessionConfig,\n EngineConfig,\n GenericSessionConfig,\n GenericSQLAlchemyConfig,\n SQLAlchemyAsyncConfig,\n SQLAlchemyInitPlugin,\n SQLAlchemySyncConfig,\n SyncSessionConfig,\n)\nfrom .serialization import SQLAlchemySerializationPlugin\n\n\nclass SQLAlchemyPlugin(SQLAlchemyInitPlugin, SQLAlchemySerializationPlugin):\n \"\"\"A plugin that provides SQLAlchemy integration.\"\"\"\n\n def __init__(self, config: SQLAlchemyAsyncConfig | SQLAlchemySyncConfig) -> None:\n SQLAlchemyInitPlugin.__init__(self, config=config)\n SQLAlchemySerializationPlugin.__init__(self)\n\n\n__all__ = (\n \"AsyncSessionConfig\",\n \"EngineConfig\",\n \"GenericSQLAlchemyConfig\",\n \"GenericSessionConfig\",\n \"SQLAlchemyAsyncConfig\",\n \"SQLAlchemyInitPlugin\",\n \"SQLAlchemyPlugin\",\n \"SQLAlchemySerializationPlugin\",\n \"SQLAlchemySyncConfig\",\n \"SyncSessionConfig\",\n)\n", "path": "litestar/contrib/sqlalchemy/plugins/__init__.py"}]}
977
374
gh_patches_debug_3229
rasdani/github-patches
git_diff
CTFd__CTFd-2371
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Test Translations & Support Spanish We need to test translations before release and make sure we support Spanish </issue> <code> [start of CTFd/constants/languages.py] 1 from CTFd.constants import RawEnum 2 3 4 class Languages(str, RawEnum): 5 ENGLISH = "en" 6 GERMAN = "de" 7 POLISH = "pl" 8 9 10 LANGUAGE_NAMES = { 11 "en": "English", 12 "de": "Deutsch", 13 "pl": "Polski", 14 } 15 16 SELECT_LANGUAGE_LIST = [("", "")] + [ 17 (str(lang), LANGUAGE_NAMES.get(str(lang))) for lang in Languages 18 ] 19 [end of CTFd/constants/languages.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/CTFd/constants/languages.py b/CTFd/constants/languages.py --- a/CTFd/constants/languages.py +++ b/CTFd/constants/languages.py @@ -5,12 +5,16 @@ ENGLISH = "en" GERMAN = "de" POLISH = "pl" + SPANISH = "es" + CHINESE = "zh" LANGUAGE_NAMES = { "en": "English", "de": "Deutsch", "pl": "Polski", + "es": "Español", + "zh": "中文", } SELECT_LANGUAGE_LIST = [("", "")] + [
{"golden_diff": "diff --git a/CTFd/constants/languages.py b/CTFd/constants/languages.py\n--- a/CTFd/constants/languages.py\n+++ b/CTFd/constants/languages.py\n@@ -5,12 +5,16 @@\n ENGLISH = \"en\"\n GERMAN = \"de\"\n POLISH = \"pl\"\n+ SPANISH = \"es\"\n+ CHINESE = \"zh\"\n \n \n LANGUAGE_NAMES = {\n \"en\": \"English\",\n \"de\": \"Deutsch\",\n \"pl\": \"Polski\",\n+ \"es\": \"Espa\u00f1ol\",\n+ \"zh\": \"\u4e2d\u6587\",\n }\n \n SELECT_LANGUAGE_LIST = [(\"\", \"\")] + [\n", "issue": "Test Translations & Support Spanish\nWe need to test translations before release and make sure we support Spanish\n", "before_files": [{"content": "from CTFd.constants import RawEnum\n\n\nclass Languages(str, RawEnum):\n ENGLISH = \"en\"\n GERMAN = \"de\"\n POLISH = \"pl\"\n\n\nLANGUAGE_NAMES = {\n \"en\": \"English\",\n \"de\": \"Deutsch\",\n \"pl\": \"Polski\",\n}\n\nSELECT_LANGUAGE_LIST = [(\"\", \"\")] + [\n (str(lang), LANGUAGE_NAMES.get(str(lang))) for lang in Languages\n]\n", "path": "CTFd/constants/languages.py"}]}
688
151
gh_patches_debug_38746
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3627
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider teavana is broken During the global build at 2021-05-26-14-42-23, spider **teavana** failed with **0 features** and **2 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/teavana.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/teavana.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/teavana.geojson)) </issue> <code> [start of locations/spiders/teavana.py] 1 import scrapy 2 import re 3 from locations.items import GeojsonPointItem 4 5 6 class ExpressSpider(scrapy.Spider): 7 8 name = "teavana" 9 item_attributes = {"brand": "Teavana"} 10 allowed_domains = ["locations.teavana.com"] 11 download_delay = 0.5 12 start_urls = ("https://locations.teavana.com/",) 13 14 def parse_stores(self, response): 15 ref = re.findall(r"[^(\/)]+$", response.url) 16 if len(ref) > 0: 17 ref = ref[0].split(".")[0] 18 properties = { 19 "addr_full": " ".join( 20 response.xpath( 21 '//span[@itemprop="streetAddress"]/span/text()' 22 ).extract() 23 ), 24 "phone": response.xpath( 25 'normalize-space(//span[@itemprop="telephone"]/text())' 26 ).extract_first(), 27 "city": response.xpath( 28 'normalize-space(//span[@itemprop="addressLocality"]/text())' 29 ).extract_first(), 30 "state": response.xpath( 31 'normalize-space(//abbr[@itemprop="addressRegion"]/text())' 32 ).extract_first(), 33 "postcode": response.xpath( 34 'normalize-space(//span[@itemprop="postalCode"]/text())' 35 ).extract_first(), 36 "ref": ref, 37 "website": response.url, 38 "lat": float( 39 response.xpath( 40 'normalize-space(//meta[@itemprop="latitude"]/@content)' 41 ).extract_first() 42 ), 43 "lon": float( 44 response.xpath( 45 'normalize-space(//meta[@itemprop="longitude"]/@content)' 46 ).extract_first() 47 ), 48 } 49 hours = response.xpath('//div[@itemprop="openingHours"]/@content').extract() 50 if hours != []: 51 hours = "; ".join(hours) 52 properties["opening_hours"] = hours 53 yield GeojsonPointItem(**properties) 54 55 def parse_city_stores(self, response): 56 stores = response.xpath( 57 '//h3[@class="Teaser-title Link Link--teaser Heading--h5"]/a/@href' 58 ).extract() 59 for store in stores: 60 yield scrapy.Request(response.urljoin(store), callback=self.parse_stores) 61 62 def parse_state(self, response): 63 urls = response.xpath( 64 '//div[@class="c-directory-list-content-wrapper"]/ul/li/a/@href' 65 ).extract() 66 for path in urls: 67 pattern = re.compile(r"..\/[a-z]{2}\/[a-z]{2}\/[^()]+\/[^()]+.html$") 68 if pattern.match(path.strip()): 69 yield scrapy.Request(response.urljoin(path), callback=self.parse_stores) 70 else: 71 yield scrapy.Request( 72 response.urljoin(path), callback=self.parse_city_stores 73 ) 74 75 def parse(self, response): 76 urls = response.xpath( 77 '//div[@class="c-directory-list-content-wrapper"]/ul/li/a/@href' 78 ).extract() 79 for path in urls: 80 pattern = re.compile(r"^[a-z]{2}\/[a-z]{2}.html$") 81 pattern1 = re.compile(r"^[a-z]{2}\/[a-z]{2}\/[^()]+\/[^()]+.html$") 82 if pattern.match(path.strip()): 83 yield scrapy.Request(response.urljoin(path), callback=self.parse_state) 84 elif pattern1.match(path.strip()): 85 yield scrapy.Request(response.urljoin(path), callback=self.parse_stores) 86 else: 87 yield scrapy.Request( 88 response.urljoin(path), callback=self.parse_city_stores 89 ) 90 [end of locations/spiders/teavana.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/teavana.py b/locations/spiders/teavana.py deleted file mode 100644 --- a/locations/spiders/teavana.py +++ /dev/null @@ -1,89 +0,0 @@ -import scrapy -import re -from locations.items import GeojsonPointItem - - -class ExpressSpider(scrapy.Spider): - - name = "teavana" - item_attributes = {"brand": "Teavana"} - allowed_domains = ["locations.teavana.com"] - download_delay = 0.5 - start_urls = ("https://locations.teavana.com/",) - - def parse_stores(self, response): - ref = re.findall(r"[^(\/)]+$", response.url) - if len(ref) > 0: - ref = ref[0].split(".")[0] - properties = { - "addr_full": " ".join( - response.xpath( - '//span[@itemprop="streetAddress"]/span/text()' - ).extract() - ), - "phone": response.xpath( - 'normalize-space(//span[@itemprop="telephone"]/text())' - ).extract_first(), - "city": response.xpath( - 'normalize-space(//span[@itemprop="addressLocality"]/text())' - ).extract_first(), - "state": response.xpath( - 'normalize-space(//abbr[@itemprop="addressRegion"]/text())' - ).extract_first(), - "postcode": response.xpath( - 'normalize-space(//span[@itemprop="postalCode"]/text())' - ).extract_first(), - "ref": ref, - "website": response.url, - "lat": float( - response.xpath( - 'normalize-space(//meta[@itemprop="latitude"]/@content)' - ).extract_first() - ), - "lon": float( - response.xpath( - 'normalize-space(//meta[@itemprop="longitude"]/@content)' - ).extract_first() - ), - } - hours = response.xpath('//div[@itemprop="openingHours"]/@content').extract() - if hours != []: - hours = "; ".join(hours) - properties["opening_hours"] = hours - yield GeojsonPointItem(**properties) - - def parse_city_stores(self, response): - stores = response.xpath( - '//h3[@class="Teaser-title Link Link--teaser Heading--h5"]/a/@href' - ).extract() - for store in stores: - yield scrapy.Request(response.urljoin(store), callback=self.parse_stores) - - def parse_state(self, response): - urls = response.xpath( - '//div[@class="c-directory-list-content-wrapper"]/ul/li/a/@href' - ).extract() - for path in urls: - pattern = re.compile(r"..\/[a-z]{2}\/[a-z]{2}\/[^()]+\/[^()]+.html$") - if pattern.match(path.strip()): - yield scrapy.Request(response.urljoin(path), callback=self.parse_stores) - else: - yield scrapy.Request( - response.urljoin(path), callback=self.parse_city_stores - ) - - def parse(self, response): - urls = response.xpath( - '//div[@class="c-directory-list-content-wrapper"]/ul/li/a/@href' - ).extract() - for path in urls: - pattern = re.compile(r"^[a-z]{2}\/[a-z]{2}.html$") - pattern1 = re.compile(r"^[a-z]{2}\/[a-z]{2}\/[^()]+\/[^()]+.html$") - if pattern.match(path.strip()): - yield scrapy.Request(response.urljoin(path), callback=self.parse_state) - elif pattern1.match(path.strip()): - yield scrapy.Request(response.urljoin(path), callback=self.parse_stores) - else: - yield scrapy.Request( - response.urljoin(path), callback=self.parse_city_stores - )
{"golden_diff": "diff --git a/locations/spiders/teavana.py b/locations/spiders/teavana.py\ndeleted file mode 100644\n--- a/locations/spiders/teavana.py\n+++ /dev/null\n@@ -1,89 +0,0 @@\n-import scrapy\n-import re\n-from locations.items import GeojsonPointItem\n-\n-\n-class ExpressSpider(scrapy.Spider):\n-\n- name = \"teavana\"\n- item_attributes = {\"brand\": \"Teavana\"}\n- allowed_domains = [\"locations.teavana.com\"]\n- download_delay = 0.5\n- start_urls = (\"https://locations.teavana.com/\",)\n-\n- def parse_stores(self, response):\n- ref = re.findall(r\"[^(\\/)]+$\", response.url)\n- if len(ref) > 0:\n- ref = ref[0].split(\".\")[0]\n- properties = {\n- \"addr_full\": \" \".join(\n- response.xpath(\n- '//span[@itemprop=\"streetAddress\"]/span/text()'\n- ).extract()\n- ),\n- \"phone\": response.xpath(\n- 'normalize-space(//span[@itemprop=\"telephone\"]/text())'\n- ).extract_first(),\n- \"city\": response.xpath(\n- 'normalize-space(//span[@itemprop=\"addressLocality\"]/text())'\n- ).extract_first(),\n- \"state\": response.xpath(\n- 'normalize-space(//abbr[@itemprop=\"addressRegion\"]/text())'\n- ).extract_first(),\n- \"postcode\": response.xpath(\n- 'normalize-space(//span[@itemprop=\"postalCode\"]/text())'\n- ).extract_first(),\n- \"ref\": ref,\n- \"website\": response.url,\n- \"lat\": float(\n- response.xpath(\n- 'normalize-space(//meta[@itemprop=\"latitude\"]/@content)'\n- ).extract_first()\n- ),\n- \"lon\": float(\n- response.xpath(\n- 'normalize-space(//meta[@itemprop=\"longitude\"]/@content)'\n- ).extract_first()\n- ),\n- }\n- hours = response.xpath('//div[@itemprop=\"openingHours\"]/@content').extract()\n- if hours != []:\n- hours = \"; \".join(hours)\n- properties[\"opening_hours\"] = hours\n- yield GeojsonPointItem(**properties)\n-\n- def parse_city_stores(self, response):\n- stores = response.xpath(\n- '//h3[@class=\"Teaser-title Link Link--teaser Heading--h5\"]/a/@href'\n- ).extract()\n- for store in stores:\n- yield scrapy.Request(response.urljoin(store), callback=self.parse_stores)\n-\n- def parse_state(self, response):\n- urls = response.xpath(\n- '//div[@class=\"c-directory-list-content-wrapper\"]/ul/li/a/@href'\n- ).extract()\n- for path in urls:\n- pattern = re.compile(r\"..\\/[a-z]{2}\\/[a-z]{2}\\/[^()]+\\/[^()]+.html$\")\n- if pattern.match(path.strip()):\n- yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)\n- else:\n- yield scrapy.Request(\n- response.urljoin(path), callback=self.parse_city_stores\n- )\n-\n- def parse(self, response):\n- urls = response.xpath(\n- '//div[@class=\"c-directory-list-content-wrapper\"]/ul/li/a/@href'\n- ).extract()\n- for path in urls:\n- pattern = re.compile(r\"^[a-z]{2}\\/[a-z]{2}.html$\")\n- pattern1 = re.compile(r\"^[a-z]{2}\\/[a-z]{2}\\/[^()]+\\/[^()]+.html$\")\n- if pattern.match(path.strip()):\n- yield scrapy.Request(response.urljoin(path), callback=self.parse_state)\n- elif pattern1.match(path.strip()):\n- yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)\n- else:\n- yield scrapy.Request(\n- response.urljoin(path), callback=self.parse_city_stores\n- )\n", "issue": "Spider teavana is broken\nDuring the global build at 2021-05-26-14-42-23, spider **teavana** failed with **0 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/teavana.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/teavana.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/teavana.geojson))\n", "before_files": [{"content": "import scrapy\nimport re\nfrom locations.items import GeojsonPointItem\n\n\nclass ExpressSpider(scrapy.Spider):\n\n name = \"teavana\"\n item_attributes = {\"brand\": \"Teavana\"}\n allowed_domains = [\"locations.teavana.com\"]\n download_delay = 0.5\n start_urls = (\"https://locations.teavana.com/\",)\n\n def parse_stores(self, response):\n ref = re.findall(r\"[^(\\/)]+$\", response.url)\n if len(ref) > 0:\n ref = ref[0].split(\".\")[0]\n properties = {\n \"addr_full\": \" \".join(\n response.xpath(\n '//span[@itemprop=\"streetAddress\"]/span/text()'\n ).extract()\n ),\n \"phone\": response.xpath(\n 'normalize-space(//span[@itemprop=\"telephone\"]/text())'\n ).extract_first(),\n \"city\": response.xpath(\n 'normalize-space(//span[@itemprop=\"addressLocality\"]/text())'\n ).extract_first(),\n \"state\": response.xpath(\n 'normalize-space(//abbr[@itemprop=\"addressRegion\"]/text())'\n ).extract_first(),\n \"postcode\": response.xpath(\n 'normalize-space(//span[@itemprop=\"postalCode\"]/text())'\n ).extract_first(),\n \"ref\": ref,\n \"website\": response.url,\n \"lat\": float(\n response.xpath(\n 'normalize-space(//meta[@itemprop=\"latitude\"]/@content)'\n ).extract_first()\n ),\n \"lon\": float(\n response.xpath(\n 'normalize-space(//meta[@itemprop=\"longitude\"]/@content)'\n ).extract_first()\n ),\n }\n hours = response.xpath('//div[@itemprop=\"openingHours\"]/@content').extract()\n if hours != []:\n hours = \"; \".join(hours)\n properties[\"opening_hours\"] = hours\n yield GeojsonPointItem(**properties)\n\n def parse_city_stores(self, response):\n stores = response.xpath(\n '//h3[@class=\"Teaser-title Link Link--teaser Heading--h5\"]/a/@href'\n ).extract()\n for store in stores:\n yield scrapy.Request(response.urljoin(store), callback=self.parse_stores)\n\n def parse_state(self, response):\n urls = response.xpath(\n '//div[@class=\"c-directory-list-content-wrapper\"]/ul/li/a/@href'\n ).extract()\n for path in urls:\n pattern = re.compile(r\"..\\/[a-z]{2}\\/[a-z]{2}\\/[^()]+\\/[^()]+.html$\")\n if pattern.match(path.strip()):\n yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)\n else:\n yield scrapy.Request(\n response.urljoin(path), callback=self.parse_city_stores\n )\n\n def parse(self, response):\n urls = response.xpath(\n '//div[@class=\"c-directory-list-content-wrapper\"]/ul/li/a/@href'\n ).extract()\n for path in urls:\n pattern = re.compile(r\"^[a-z]{2}\\/[a-z]{2}.html$\")\n pattern1 = re.compile(r\"^[a-z]{2}\\/[a-z]{2}\\/[^()]+\\/[^()]+.html$\")\n if pattern.match(path.strip()):\n yield scrapy.Request(response.urljoin(path), callback=self.parse_state)\n elif pattern1.match(path.strip()):\n yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)\n else:\n yield scrapy.Request(\n response.urljoin(path), callback=self.parse_city_stores\n )\n", "path": "locations/spiders/teavana.py"}]}
1,649
894
gh_patches_debug_2128
rasdani/github-patches
git_diff
projectmesa__mesa-891
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cookiecutter doesn't work on 0.8.7 release **Describe the bug** `mesa startproject` fails after `pipenv install mesa` ``` A valid repository for "/home/neil/.local/share/virtualenvs/baseline-economy-6fg_iky1/lib/python3.8/site-packages/mesa/cookiecutter-mesa" could not be found in the following locations: ... ``` **Expected behavior** Generate the project layout **To Reproduce** - pipenv install mesa - mesa startproject **Additional context** The cookiecutter directory from the repo is missing from the installation. Additionally there is no help message for `startproject` when you run `mesa --help` </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 import re 4 5 from setuptools import setup, find_packages 6 from codecs import open 7 8 requires = ["click", "cookiecutter", "networkx", "numpy", "pandas", "tornado", "tqdm"] 9 10 extras_require = { 11 "dev": ["coverage", "flake8", "pytest >= 3.6", "pytest-cov", "sphinx"], 12 "docs": ["sphinx"], 13 } 14 15 version = "" 16 with open("mesa/__init__.py", "r") as fd: 17 version = re.search( 18 r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE 19 ).group(1) 20 21 with open("README.rst", "rb", encoding="utf-8") as f: 22 readme = f.read() 23 24 setup( 25 name="Mesa", 26 version=version, 27 description="Agent-based modeling (ABM) in Python 3+", 28 long_description=readme, 29 author="Project Mesa Team", 30 author_email="[email protected]", 31 url="https://github.com/projectmesa/mesa", 32 packages=find_packages(), 33 package_data={ 34 "mesa": [ 35 "visualization/templates/*.html", 36 "visualization/templates/css/*", 37 "visualization/templates/fonts/*", 38 "visualization/templates/js/*", 39 ], 40 "cookiecutter-mesa": ["cookiecutter-mesa/*"], 41 }, 42 include_package_data=True, 43 install_requires=requires, 44 extras_require=extras_require, 45 keywords="agent based modeling model ABM simulation multi-agent", 46 license="Apache 2.0", 47 zip_safe=False, 48 classifiers=[ 49 "Topic :: Scientific/Engineering", 50 "Topic :: Scientific/Engineering :: Artificial Life", 51 "Topic :: Scientific/Engineering :: Artificial Intelligence", 52 "Intended Audience :: Science/Research", 53 "Programming Language :: Python :: 3 :: Only", 54 "License :: OSI Approved :: Apache Software License", 55 "Operating System :: OS Independent", 56 "Development Status :: 3 - Alpha", 57 "Natural Language :: English", 58 ], 59 entry_points=""" 60 [console_scripts] 61 mesa=mesa.main:cli 62 """, 63 ) 64 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ requires = ["click", "cookiecutter", "networkx", "numpy", "pandas", "tornado", "tqdm"] extras_require = { - "dev": ["coverage", "flake8", "pytest >= 3.6", "pytest-cov", "sphinx"], + "dev": ["coverage", "flake8", "pytest >= 4.6", "pytest-cov", "sphinx"], "docs": ["sphinx"], }
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -8,7 +8,7 @@\n requires = [\"click\", \"cookiecutter\", \"networkx\", \"numpy\", \"pandas\", \"tornado\", \"tqdm\"]\n \n extras_require = {\n- \"dev\": [\"coverage\", \"flake8\", \"pytest >= 3.6\", \"pytest-cov\", \"sphinx\"],\n+ \"dev\": [\"coverage\", \"flake8\", \"pytest >= 4.6\", \"pytest-cov\", \"sphinx\"],\n \"docs\": [\"sphinx\"],\n }\n", "issue": "Cookiecutter doesn't work on 0.8.7 release\n**Describe the bug**\r\n`mesa startproject` fails after `pipenv install mesa`\r\n```\r\nA valid repository for \"/home/neil/.local/share/virtualenvs/baseline-economy-6fg_iky1/lib/python3.8/site-packages/mesa/cookiecutter-mesa\" could not be found in the following locations:\r\n...\r\n```\r\n\r\n**Expected behavior**\r\nGenerate the project layout\r\n\r\n**To Reproduce**\r\n- pipenv install mesa\r\n- mesa startproject\r\n\r\n**Additional context**\r\nThe cookiecutter directory from the repo is missing from the installation.\r\nAdditionally there is no help message for `startproject` when you run `mesa --help`\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\n\nfrom setuptools import setup, find_packages\nfrom codecs import open\n\nrequires = [\"click\", \"cookiecutter\", \"networkx\", \"numpy\", \"pandas\", \"tornado\", \"tqdm\"]\n\nextras_require = {\n \"dev\": [\"coverage\", \"flake8\", \"pytest >= 3.6\", \"pytest-cov\", \"sphinx\"],\n \"docs\": [\"sphinx\"],\n}\n\nversion = \"\"\nwith open(\"mesa/__init__.py\", \"r\") as fd:\n version = re.search(\n r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]', fd.read(), re.MULTILINE\n ).group(1)\n\nwith open(\"README.rst\", \"rb\", encoding=\"utf-8\") as f:\n readme = f.read()\n\nsetup(\n name=\"Mesa\",\n version=version,\n description=\"Agent-based modeling (ABM) in Python 3+\",\n long_description=readme,\n author=\"Project Mesa Team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/projectmesa/mesa\",\n packages=find_packages(),\n package_data={\n \"mesa\": [\n \"visualization/templates/*.html\",\n \"visualization/templates/css/*\",\n \"visualization/templates/fonts/*\",\n \"visualization/templates/js/*\",\n ],\n \"cookiecutter-mesa\": [\"cookiecutter-mesa/*\"],\n },\n include_package_data=True,\n install_requires=requires,\n extras_require=extras_require,\n keywords=\"agent based modeling model ABM simulation multi-agent\",\n license=\"Apache 2.0\",\n zip_safe=False,\n classifiers=[\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Life\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 3 - Alpha\",\n \"Natural Language :: English\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n mesa=mesa.main:cli\n \"\"\",\n)\n", "path": "setup.py"}]}
1,290
133
gh_patches_debug_1092
rasdani/github-patches
git_diff
psychopy__psychopy-2333
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Demos -> Hardware -> testSoundLatency.py not working in v3.0.6 Running Demo -> Hardware -> testSoundLatency.py results in the following error message: ``` ##### Running: C:\Program Files (x86)\PsychoPy3\lib\site-packages\psychopy\demos\coder\hardware\testSoundLatency.py ##### pygame 1.9.4 Hello from the pygame community. https://www.pygame.org/contribute.html Traceback (most recent call last): File "C:\Program Files (x86)\PsychoPy3\lib\site-packages\psychopy\demos\coder\hardware\testSoundLatency.py", line 16, in <module> from labjack import u3 ModuleNotFoundError: No module named 'labjack' ``` Windows 7, 64 bit, PsychoPy 3.0.6 64 bit standalone </issue> <code> [start of psychopy/demos/coder/hardware/labjack_u3.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """ 5 Demo for using labjack DAC devices 6 7 See also 8 http: //labjack.com/support/labjackpython 9 but note that the version shipped with standalone PsychoPy 10 has u3 (and others below an umbrella called labjack) so the import 11 line is slightly different to the documentation on LabJack's website 12 """ 13 14 from __future__ import absolute_import, division, print_function 15 16 from builtins import range 17 from psychopy import visual, core, event, sound 18 from labjack import u3 19 20 # sound.setAudioAPI('pyaudio') 21 22 win = visual.Window([800, 800]) 23 stim = visual.GratingStim(win, color=-1, sf=0) 24 snd = sound.Sound(880) 25 print(snd) 26 # setup labjack U3 27 ports = u3.U3() 28 FIO4 = 6004 # the address of line FIO4 29 30 while True: 31 # do this repeatedly for timing tests 32 ports.writeRegister(FIO4, 0) # start low 33 34 # draw black square 35 stim.draw() 36 win.flip() 37 38 # wait for a key press 39 if 'q' in event.waitKeys(): 40 break 41 42 # set to white, flip window and raise level port FIO4 43 stim.setColor(1) 44 stim.draw() 45 win.flip() 46 ports.writeRegister(FIO4, 1) 47 snd.play() 48 for frameN in range(4): 49 stim.draw() 50 win.flip() 51 52 # set color back to black and set FIO4 to low again 53 stim.setColor(-1) 54 stim.draw() 55 win.flip() 56 ports.writeRegister(FIO4, 0) 57 58 win.close() 59 core.quit() 60 61 # The contents of this file are in the public domain. 62 [end of psychopy/demos/coder/hardware/labjack_u3.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/psychopy/demos/coder/hardware/labjack_u3.py b/psychopy/demos/coder/hardware/labjack_u3.py --- a/psychopy/demos/coder/hardware/labjack_u3.py +++ b/psychopy/demos/coder/hardware/labjack_u3.py @@ -15,7 +15,10 @@ from builtins import range from psychopy import visual, core, event, sound -from labjack import u3 +try: + from labjack import u3 +except ImportError: + import u3 # sound.setAudioAPI('pyaudio')
{"golden_diff": "diff --git a/psychopy/demos/coder/hardware/labjack_u3.py b/psychopy/demos/coder/hardware/labjack_u3.py\n--- a/psychopy/demos/coder/hardware/labjack_u3.py\n+++ b/psychopy/demos/coder/hardware/labjack_u3.py\n@@ -15,7 +15,10 @@\n \n from builtins import range\n from psychopy import visual, core, event, sound\n-from labjack import u3\n+try:\n+ from labjack import u3\n+except ImportError:\n+ import u3\n \n # sound.setAudioAPI('pyaudio')\n", "issue": "Demos -> Hardware -> testSoundLatency.py not working in v3.0.6\nRunning Demo -> Hardware -> testSoundLatency.py results in the following error message:\r\n```\r\n##### Running: C:\\Program Files (x86)\\PsychoPy3\\lib\\site-packages\\psychopy\\demos\\coder\\hardware\\testSoundLatency.py #####\r\npygame 1.9.4\r\nHello from the pygame community. https://www.pygame.org/contribute.html\r\nTraceback (most recent call last):\r\n File \"C:\\Program Files (x86)\\PsychoPy3\\lib\\site-packages\\psychopy\\demos\\coder\\hardware\\testSoundLatency.py\", line 16, in <module>\r\n from labjack import u3\r\nModuleNotFoundError: No module named 'labjack'\r\n```\r\nWindows 7, 64 bit, PsychoPy 3.0.6 64 bit standalone\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDemo for using labjack DAC devices\n\nSee also\n http: //labjack.com/support/labjackpython\nbut note that the version shipped with standalone PsychoPy\nhas u3 (and others below an umbrella called labjack) so the import\nline is slightly different to the documentation on LabJack's website\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom builtins import range\nfrom psychopy import visual, core, event, sound\nfrom labjack import u3\n\n# sound.setAudioAPI('pyaudio')\n\nwin = visual.Window([800, 800])\nstim = visual.GratingStim(win, color=-1, sf=0)\nsnd = sound.Sound(880)\nprint(snd)\n# setup labjack U3\nports = u3.U3()\nFIO4 = 6004 # the address of line FIO4\n\nwhile True:\n # do this repeatedly for timing tests\n ports.writeRegister(FIO4, 0) # start low\n\n # draw black square\n stim.draw()\n win.flip()\n\n # wait for a key press\n if 'q' in event.waitKeys():\n break\n\n # set to white, flip window and raise level port FIO4\n stim.setColor(1)\n stim.draw()\n win.flip()\n ports.writeRegister(FIO4, 1)\n snd.play()\n for frameN in range(4):\n stim.draw()\n win.flip()\n\n # set color back to black and set FIO4 to low again\n stim.setColor(-1)\n stim.draw()\n win.flip()\n ports.writeRegister(FIO4, 0)\n\nwin.close()\ncore.quit()\n\n# The contents of this file are in the public domain.\n", "path": "psychopy/demos/coder/hardware/labjack_u3.py"}]}
1,267
139
gh_patches_debug_7880
rasdani/github-patches
git_diff
locustio__locust-841
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Don't rely on obsolete msgpack-python msgpack-python looks obsolete -> https://pypi.org/project/msgpack-python/ "This package is deprecated. Install msgpack instead." but msgpack doesn't provide pythonegg(msgpack-python). Please consider switching to msgpack directly instead. </issue> <code> [start of setup.py] 1 # -*- coding: utf-8 -*- 2 import ast 3 import os 4 import re 5 6 from setuptools import find_packages, setup 7 8 # parse version from locust/__init__.py 9 _version_re = re.compile(r'__version__\s+=\s+(.*)') 10 _init_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), "locust", "__init__.py") 11 with open(_init_file, 'rb') as f: 12 version = str(ast.literal_eval(_version_re.search( 13 f.read().decode('utf-8')).group(1))) 14 15 setup( 16 name='locustio', 17 version=version, 18 description="Website load testing framework", 19 long_description="""Locust is a python utility for doing easy, distributed load testing of a web site""", 20 classifiers=[ 21 "Topic :: Software Development :: Testing :: Traffic Generation", 22 "Development Status :: 4 - Beta", 23 "License :: OSI Approved :: MIT License", 24 "Operating System :: OS Independent", 25 "Programming Language :: Python", 26 "Programming Language :: Python :: 2", 27 "Programming Language :: Python :: 2.7", 28 "Programming Language :: Python :: 3", 29 "Programming Language :: Python :: 3.4", 30 "Programming Language :: Python :: 3.5", 31 "Programming Language :: Python :: 3.6", 32 "Intended Audience :: Developers", 33 "Intended Audience :: System Administrators", 34 ], 35 keywords='', 36 author='Jonatan Heyman, Carl Bystrom, Joakim Hamrén, Hugo Heyman', 37 author_email='', 38 url='https://locust.io/', 39 license='MIT', 40 packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), 41 include_package_data=True, 42 zip_safe=False, 43 install_requires=["gevent>=1.2.2", "flask>=0.10.1", "requests>=2.9.1", "msgpack-python>=0.4.2", "six>=1.10.0", "pyzmq>=16.0.2"], 44 test_suite="locust.test", 45 tests_require=['mock'], 46 entry_points={ 47 'console_scripts': [ 48 'locust = locust.main:main', 49 ] 50 }, 51 ) 52 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -40,7 +40,7 @@ packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), include_package_data=True, zip_safe=False, - install_requires=["gevent>=1.2.2", "flask>=0.10.1", "requests>=2.9.1", "msgpack-python>=0.4.2", "six>=1.10.0", "pyzmq>=16.0.2"], + install_requires=["gevent>=1.2.2", "flask>=0.10.1", "requests>=2.9.1", "msgpack>=0.4.2", "six>=1.10.0", "pyzmq>=16.0.2"], test_suite="locust.test", tests_require=['mock'], entry_points={
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -40,7 +40,7 @@\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=False,\n- install_requires=[\"gevent>=1.2.2\", \"flask>=0.10.1\", \"requests>=2.9.1\", \"msgpack-python>=0.4.2\", \"six>=1.10.0\", \"pyzmq>=16.0.2\"],\n+ install_requires=[\"gevent>=1.2.2\", \"flask>=0.10.1\", \"requests>=2.9.1\", \"msgpack>=0.4.2\", \"six>=1.10.0\", \"pyzmq>=16.0.2\"],\n test_suite=\"locust.test\",\n tests_require=['mock'],\n entry_points={\n", "issue": "Don't rely on obsolete msgpack-python\n\r\nmsgpack-python looks obsolete -> https://pypi.org/project/msgpack-python/\r\n\"This package is deprecated. Install msgpack instead.\"\r\n\r\nbut msgpack doesn't provide pythonegg(msgpack-python).\r\n\r\nPlease consider switching to msgpack directly instead.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\n\nfrom setuptools import find_packages, setup\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n_init_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"locust\", \"__init__.py\")\nwith open(_init_file, 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\nsetup(\n name='locustio',\n version=version,\n description=\"Website load testing framework\",\n long_description=\"\"\"Locust is a python utility for doing easy, distributed load testing of a web site\"\"\",\n classifiers=[\n \"Topic :: Software Development :: Testing :: Traffic Generation\",\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n ],\n keywords='',\n author='Jonatan Heyman, Carl Bystrom, Joakim Hamr\u00e9n, Hugo Heyman',\n author_email='',\n url='https://locust.io/',\n license='MIT',\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\"gevent>=1.2.2\", \"flask>=0.10.1\", \"requests>=2.9.1\", \"msgpack-python>=0.4.2\", \"six>=1.10.0\", \"pyzmq>=16.0.2\"],\n test_suite=\"locust.test\",\n tests_require=['mock'],\n entry_points={\n 'console_scripts': [\n 'locust = locust.main:main',\n ]\n },\n)\n", "path": "setup.py"}]}
1,168
211
gh_patches_debug_28502
rasdani/github-patches
git_diff
CTFd__CTFd-1560
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Colon in CTF name breaks emails This is because of: https://tools.ietf.org/html/rfc5322#section-2.2 This can probably be fixed with `"HE:tech" <[email protected]>`. </issue> <code> [start of CTFd/utils/email/smtp.py] 1 import smtplib 2 from email.message import EmailMessage 3 from socket import timeout 4 5 from CTFd.utils import get_app_config, get_config 6 7 8 def get_smtp(host, port, username=None, password=None, TLS=None, SSL=None, auth=None): 9 if SSL is None: 10 smtp = smtplib.SMTP(host, port, timeout=3) 11 else: 12 smtp = smtplib.SMTP_SSL(host, port, timeout=3) 13 14 if TLS: 15 smtp.starttls() 16 17 if auth: 18 smtp.login(username, password) 19 return smtp 20 21 22 def sendmail(addr, text, subject): 23 ctf_name = get_config("ctf_name") 24 mailfrom_addr = get_config("mailfrom_addr") or get_app_config("MAILFROM_ADDR") 25 mailfrom_addr = "{} <{}>".format(ctf_name, mailfrom_addr) 26 27 data = { 28 "host": get_config("mail_server") or get_app_config("MAIL_SERVER"), 29 "port": int(get_config("mail_port") or get_app_config("MAIL_PORT")), 30 } 31 username = get_config("mail_username") or get_app_config("MAIL_USERNAME") 32 password = get_config("mail_password") or get_app_config("MAIL_PASSWORD") 33 TLS = get_config("mail_tls") or get_app_config("MAIL_TLS") 34 SSL = get_config("mail_ssl") or get_app_config("MAIL_SSL") 35 auth = get_config("mail_useauth") or get_app_config("MAIL_USEAUTH") 36 37 if username: 38 data["username"] = username 39 if password: 40 data["password"] = password 41 if TLS: 42 data["TLS"] = TLS 43 if SSL: 44 data["SSL"] = SSL 45 if auth: 46 data["auth"] = auth 47 48 try: 49 smtp = get_smtp(**data) 50 51 msg = EmailMessage() 52 msg.set_content(text) 53 54 msg["Subject"] = subject 55 msg["From"] = mailfrom_addr 56 msg["To"] = addr 57 58 smtp.send_message(msg) 59 60 smtp.quit() 61 return True, "Email sent" 62 except smtplib.SMTPException as e: 63 return False, str(e) 64 except timeout: 65 return False, "SMTP server connection timed out" 66 except Exception as e: 67 return False, str(e) 68 [end of CTFd/utils/email/smtp.py] [start of CTFd/utils/email/mailgun.py] 1 import requests 2 3 from CTFd.utils import get_app_config, get_config 4 5 6 def sendmail(addr, text, subject): 7 ctf_name = get_config("ctf_name") 8 mailfrom_addr = get_config("mailfrom_addr") or get_app_config("MAILFROM_ADDR") 9 mailfrom_addr = "{} <{}>".format(ctf_name, mailfrom_addr) 10 11 mailgun_base_url = get_config("mailgun_base_url") or get_app_config( 12 "MAILGUN_BASE_URL" 13 ) 14 mailgun_api_key = get_config("mailgun_api_key") or get_app_config("MAILGUN_API_KEY") 15 try: 16 r = requests.post( 17 mailgun_base_url + "/messages", 18 auth=("api", mailgun_api_key), 19 data={ 20 "from": mailfrom_addr, 21 "to": [addr], 22 "subject": subject, 23 "text": text, 24 }, 25 timeout=1.0, 26 ) 27 except requests.RequestException as e: 28 return ( 29 False, 30 "{error} exception occured while handling your request".format( 31 error=type(e).__name__ 32 ), 33 ) 34 35 if r.status_code == 200: 36 return True, "Email sent" 37 else: 38 return False, "Mailgun settings are incorrect" 39 [end of CTFd/utils/email/mailgun.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/CTFd/utils/email/mailgun.py b/CTFd/utils/email/mailgun.py --- a/CTFd/utils/email/mailgun.py +++ b/CTFd/utils/email/mailgun.py @@ -1,3 +1,5 @@ +from email.utils import formataddr + import requests from CTFd.utils import get_app_config, get_config @@ -6,7 +8,7 @@ def sendmail(addr, text, subject): ctf_name = get_config("ctf_name") mailfrom_addr = get_config("mailfrom_addr") or get_app_config("MAILFROM_ADDR") - mailfrom_addr = "{} <{}>".format(ctf_name, mailfrom_addr) + mailfrom_addr = formataddr((ctf_name, mailfrom_addr)) mailgun_base_url = get_config("mailgun_base_url") or get_app_config( "MAILGUN_BASE_URL" diff --git a/CTFd/utils/email/smtp.py b/CTFd/utils/email/smtp.py --- a/CTFd/utils/email/smtp.py +++ b/CTFd/utils/email/smtp.py @@ -1,5 +1,6 @@ import smtplib from email.message import EmailMessage +from email.utils import formataddr from socket import timeout from CTFd.utils import get_app_config, get_config @@ -22,7 +23,7 @@ def sendmail(addr, text, subject): ctf_name = get_config("ctf_name") mailfrom_addr = get_config("mailfrom_addr") or get_app_config("MAILFROM_ADDR") - mailfrom_addr = "{} <{}>".format(ctf_name, mailfrom_addr) + mailfrom_addr = formataddr((ctf_name, mailfrom_addr)) data = { "host": get_config("mail_server") or get_app_config("MAIL_SERVER"),
{"golden_diff": "diff --git a/CTFd/utils/email/mailgun.py b/CTFd/utils/email/mailgun.py\n--- a/CTFd/utils/email/mailgun.py\n+++ b/CTFd/utils/email/mailgun.py\n@@ -1,3 +1,5 @@\n+from email.utils import formataddr\n+\n import requests\n \n from CTFd.utils import get_app_config, get_config\n@@ -6,7 +8,7 @@\n def sendmail(addr, text, subject):\n ctf_name = get_config(\"ctf_name\")\n mailfrom_addr = get_config(\"mailfrom_addr\") or get_app_config(\"MAILFROM_ADDR\")\n- mailfrom_addr = \"{} <{}>\".format(ctf_name, mailfrom_addr)\n+ mailfrom_addr = formataddr((ctf_name, mailfrom_addr))\n \n mailgun_base_url = get_config(\"mailgun_base_url\") or get_app_config(\n \"MAILGUN_BASE_URL\"\ndiff --git a/CTFd/utils/email/smtp.py b/CTFd/utils/email/smtp.py\n--- a/CTFd/utils/email/smtp.py\n+++ b/CTFd/utils/email/smtp.py\n@@ -1,5 +1,6 @@\n import smtplib\n from email.message import EmailMessage\n+from email.utils import formataddr\n from socket import timeout\n \n from CTFd.utils import get_app_config, get_config\n@@ -22,7 +23,7 @@\n def sendmail(addr, text, subject):\n ctf_name = get_config(\"ctf_name\")\n mailfrom_addr = get_config(\"mailfrom_addr\") or get_app_config(\"MAILFROM_ADDR\")\n- mailfrom_addr = \"{} <{}>\".format(ctf_name, mailfrom_addr)\n+ mailfrom_addr = formataddr((ctf_name, mailfrom_addr))\n \n data = {\n \"host\": get_config(\"mail_server\") or get_app_config(\"MAIL_SERVER\"),\n", "issue": "Colon in CTF name breaks emails\nThis is because of:\r\n\r\nhttps://tools.ietf.org/html/rfc5322#section-2.2\r\n\r\nThis can probably be fixed with `\"HE:tech\" <[email protected]>`.\n", "before_files": [{"content": "import smtplib\nfrom email.message import EmailMessage\nfrom socket import timeout\n\nfrom CTFd.utils import get_app_config, get_config\n\n\ndef get_smtp(host, port, username=None, password=None, TLS=None, SSL=None, auth=None):\n if SSL is None:\n smtp = smtplib.SMTP(host, port, timeout=3)\n else:\n smtp = smtplib.SMTP_SSL(host, port, timeout=3)\n\n if TLS:\n smtp.starttls()\n\n if auth:\n smtp.login(username, password)\n return smtp\n\n\ndef sendmail(addr, text, subject):\n ctf_name = get_config(\"ctf_name\")\n mailfrom_addr = get_config(\"mailfrom_addr\") or get_app_config(\"MAILFROM_ADDR\")\n mailfrom_addr = \"{} <{}>\".format(ctf_name, mailfrom_addr)\n\n data = {\n \"host\": get_config(\"mail_server\") or get_app_config(\"MAIL_SERVER\"),\n \"port\": int(get_config(\"mail_port\") or get_app_config(\"MAIL_PORT\")),\n }\n username = get_config(\"mail_username\") or get_app_config(\"MAIL_USERNAME\")\n password = get_config(\"mail_password\") or get_app_config(\"MAIL_PASSWORD\")\n TLS = get_config(\"mail_tls\") or get_app_config(\"MAIL_TLS\")\n SSL = get_config(\"mail_ssl\") or get_app_config(\"MAIL_SSL\")\n auth = get_config(\"mail_useauth\") or get_app_config(\"MAIL_USEAUTH\")\n\n if username:\n data[\"username\"] = username\n if password:\n data[\"password\"] = password\n if TLS:\n data[\"TLS\"] = TLS\n if SSL:\n data[\"SSL\"] = SSL\n if auth:\n data[\"auth\"] = auth\n\n try:\n smtp = get_smtp(**data)\n\n msg = EmailMessage()\n msg.set_content(text)\n\n msg[\"Subject\"] = subject\n msg[\"From\"] = mailfrom_addr\n msg[\"To\"] = addr\n\n smtp.send_message(msg)\n\n smtp.quit()\n return True, \"Email sent\"\n except smtplib.SMTPException as e:\n return False, str(e)\n except timeout:\n return False, \"SMTP server connection timed out\"\n except Exception as e:\n return False, str(e)\n", "path": "CTFd/utils/email/smtp.py"}, {"content": "import requests\n\nfrom CTFd.utils import get_app_config, get_config\n\n\ndef sendmail(addr, text, subject):\n ctf_name = get_config(\"ctf_name\")\n mailfrom_addr = get_config(\"mailfrom_addr\") or get_app_config(\"MAILFROM_ADDR\")\n mailfrom_addr = \"{} <{}>\".format(ctf_name, mailfrom_addr)\n\n mailgun_base_url = get_config(\"mailgun_base_url\") or get_app_config(\n \"MAILGUN_BASE_URL\"\n )\n mailgun_api_key = get_config(\"mailgun_api_key\") or get_app_config(\"MAILGUN_API_KEY\")\n try:\n r = requests.post(\n mailgun_base_url + \"/messages\",\n auth=(\"api\", mailgun_api_key),\n data={\n \"from\": mailfrom_addr,\n \"to\": [addr],\n \"subject\": subject,\n \"text\": text,\n },\n timeout=1.0,\n )\n except requests.RequestException as e:\n return (\n False,\n \"{error} exception occured while handling your request\".format(\n error=type(e).__name__\n ),\n )\n\n if r.status_code == 200:\n return True, \"Email sent\"\n else:\n return False, \"Mailgun settings are incorrect\"\n", "path": "CTFd/utils/email/mailgun.py"}]}
1,579
400
gh_patches_debug_29426
rasdani/github-patches
git_diff
jupyterhub__jupyterhub-1820
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Running jupyterhub upgrade-db with PostgreSQL database fails **How to reproduce the issue** Run `jupyterhub upgrade-db` with a PostgreSQL database to upgrade to 99a28a4418e1. **What you expected to happen** Successful schema update. **What actually happens** It fails with an sqlalchemy `ProgrammingError` message that originates here: https://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/alembic/versions/99a28a4418e1_user_created.py#L40 in particular I think that should be `IS NOT NULL` not just `NOT NULL`. I substituted this live and it allowed the upgrade to proceed. **Share what version of JupyterHub you are using** Latest master. </issue> <code> [start of jupyterhub/alembic/versions/99a28a4418e1_user_created.py] 1 """user.created and spawner.started 2 3 Revision ID: 99a28a4418e1 4 Revises: 56cc5a70207e 5 Create Date: 2018-03-21 14:27:17.466841 6 7 """ 8 9 # revision identifiers, used by Alembic. 10 revision = '99a28a4418e1' 11 down_revision = '56cc5a70207e' 12 branch_labels = None 13 depends_on = None 14 15 16 from alembic import op 17 import sqlalchemy as sa 18 19 from datetime import datetime 20 21 def upgrade(): 22 op.add_column('users', sa.Column('created', sa.DateTime, nullable=True)) 23 c = op.get_bind() 24 # fill created date with current time 25 now = datetime.utcnow() 26 c.execute(""" 27 UPDATE users 28 SET created='%s' 29 """ % (now,) 30 ) 31 32 tables = c.engine.table_names() 33 34 if 'spawners' in tables: 35 op.add_column('spawners', sa.Column('started', sa.DateTime, nullable=True)) 36 # fill started value with now for running servers 37 c.execute(""" 38 UPDATE spawners 39 SET started='%s' 40 WHERE server_id NOT NULL 41 """ % (now,) 42 ) 43 44 45 def downgrade(): 46 op.drop_column('users', 'created') 47 op.drop_column('spawners', 'started') 48 [end of jupyterhub/alembic/versions/99a28a4418e1_user_created.py] [start of jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py] 1 """token tracking 2 3 Revision ID: 56cc5a70207e 4 Revises: 1cebaf56856c 5 Create Date: 2017-12-19 15:21:09.300513 6 7 """ 8 9 # revision identifiers, used by Alembic. 10 revision = '56cc5a70207e' 11 down_revision = '1cebaf56856c' 12 branch_labels = None 13 depends_on = None 14 15 from alembic import op 16 import sqlalchemy as sa 17 18 19 def upgrade(): 20 tables = op.get_bind().engine.table_names() 21 op.add_column('api_tokens', sa.Column('created', sa.DateTime(), nullable=True)) 22 op.add_column('api_tokens', sa.Column('last_activity', sa.DateTime(), nullable=True)) 23 op.add_column('api_tokens', sa.Column('note', sa.Unicode(length=1023), nullable=True)) 24 if 'oauth_access_tokens' in tables: 25 op.add_column('oauth_access_tokens', sa.Column('created', sa.DateTime(), nullable=True)) 26 op.add_column('oauth_access_tokens', sa.Column('last_activity', sa.DateTime(), nullable=True)) 27 op.create_foreign_key(None, 'oauth_access_tokens', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE') 28 op.create_foreign_key(None, 'oauth_codes', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE') 29 30 31 def downgrade(): 32 op.drop_constraint(None, 'oauth_codes', type_='foreignkey') 33 op.drop_constraint(None, 'oauth_access_tokens', type_='foreignkey') 34 op.drop_column('oauth_access_tokens', 'last_activity') 35 op.drop_column('oauth_access_tokens', 'created') 36 op.drop_column('api_tokens', 'note') 37 op.drop_column('api_tokens', 'last_activity') 38 op.drop_column('api_tokens', 'created') 39 [end of jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py b/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py --- a/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py +++ b/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py @@ -15,6 +15,9 @@ from alembic import op import sqlalchemy as sa +import logging +logger = logging.getLogger('alembic') + def upgrade(): tables = op.get_bind().engine.table_names() @@ -24,8 +27,11 @@ if 'oauth_access_tokens' in tables: op.add_column('oauth_access_tokens', sa.Column('created', sa.DateTime(), nullable=True)) op.add_column('oauth_access_tokens', sa.Column('last_activity', sa.DateTime(), nullable=True)) - op.create_foreign_key(None, 'oauth_access_tokens', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE') - op.create_foreign_key(None, 'oauth_codes', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE') + if op.get_context().dialect.name == 'sqlite': + logger.warning("sqlite cannot use ALTER TABLE to create foreign keys. Upgrade will be incomplete.") + else: + op.create_foreign_key(None, 'oauth_access_tokens', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE') + op.create_foreign_key(None, 'oauth_codes', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE') def downgrade(): diff --git a/jupyterhub/alembic/versions/99a28a4418e1_user_created.py b/jupyterhub/alembic/versions/99a28a4418e1_user_created.py --- a/jupyterhub/alembic/versions/99a28a4418e1_user_created.py +++ b/jupyterhub/alembic/versions/99a28a4418e1_user_created.py @@ -37,7 +37,7 @@ c.execute(""" UPDATE spawners SET started='%s' - WHERE server_id NOT NULL + WHERE server_id IS NOT NULL """ % (now,) )
{"golden_diff": "diff --git a/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py b/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py\n--- a/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py\n+++ b/jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py\n@@ -15,6 +15,9 @@\n from alembic import op\n import sqlalchemy as sa\n \n+import logging\n+logger = logging.getLogger('alembic')\n+\n \n def upgrade():\n tables = op.get_bind().engine.table_names()\n@@ -24,8 +27,11 @@\n if 'oauth_access_tokens' in tables:\n op.add_column('oauth_access_tokens', sa.Column('created', sa.DateTime(), nullable=True))\n op.add_column('oauth_access_tokens', sa.Column('last_activity', sa.DateTime(), nullable=True))\n- op.create_foreign_key(None, 'oauth_access_tokens', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')\n- op.create_foreign_key(None, 'oauth_codes', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')\n+ if op.get_context().dialect.name == 'sqlite':\n+ logger.warning(\"sqlite cannot use ALTER TABLE to create foreign keys. Upgrade will be incomplete.\")\n+ else:\n+ op.create_foreign_key(None, 'oauth_access_tokens', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')\n+ op.create_foreign_key(None, 'oauth_codes', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')\n \n \n def downgrade():\ndiff --git a/jupyterhub/alembic/versions/99a28a4418e1_user_created.py b/jupyterhub/alembic/versions/99a28a4418e1_user_created.py\n--- a/jupyterhub/alembic/versions/99a28a4418e1_user_created.py\n+++ b/jupyterhub/alembic/versions/99a28a4418e1_user_created.py\n@@ -37,7 +37,7 @@\n c.execute(\"\"\"\n UPDATE spawners\n SET started='%s'\n- WHERE server_id NOT NULL\n+ WHERE server_id IS NOT NULL\n \"\"\" % (now,)\n )\n", "issue": "Running jupyterhub upgrade-db with PostgreSQL database fails\n**How to reproduce the issue**\r\n\r\nRun `jupyterhub upgrade-db` with a PostgreSQL database to upgrade to 99a28a4418e1.\r\n\r\n**What you expected to happen**\r\n\r\nSuccessful schema update.\r\n\r\n**What actually happens**\r\n\r\nIt fails with an sqlalchemy `ProgrammingError` message that originates here:\r\n\r\nhttps://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/alembic/versions/99a28a4418e1_user_created.py#L40\r\n\r\nin particular I think that should be `IS NOT NULL` not just `NOT NULL`. I substituted this live and it allowed the upgrade to proceed.\r\n\r\n**Share what version of JupyterHub you are using**\r\n\r\nLatest master.\n", "before_files": [{"content": "\"\"\"user.created and spawner.started\n\nRevision ID: 99a28a4418e1\nRevises: 56cc5a70207e\nCreate Date: 2018-03-21 14:27:17.466841\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '99a28a4418e1'\ndown_revision = '56cc5a70207e'\nbranch_labels = None\ndepends_on = None\n\n\nfrom alembic import op\nimport sqlalchemy as sa\n\nfrom datetime import datetime\n\ndef upgrade():\n op.add_column('users', sa.Column('created', sa.DateTime, nullable=True))\n c = op.get_bind()\n # fill created date with current time\n now = datetime.utcnow()\n c.execute(\"\"\"\n UPDATE users\n SET created='%s'\n \"\"\" % (now,)\n )\n\n tables = c.engine.table_names()\n\n if 'spawners' in tables:\n op.add_column('spawners', sa.Column('started', sa.DateTime, nullable=True))\n # fill started value with now for running servers\n c.execute(\"\"\"\n UPDATE spawners\n SET started='%s'\n WHERE server_id NOT NULL\n \"\"\" % (now,)\n )\n\n\ndef downgrade():\n op.drop_column('users', 'created')\n op.drop_column('spawners', 'started')\n", "path": "jupyterhub/alembic/versions/99a28a4418e1_user_created.py"}, {"content": "\"\"\"token tracking\n\nRevision ID: 56cc5a70207e\nRevises: 1cebaf56856c\nCreate Date: 2017-12-19 15:21:09.300513\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '56cc5a70207e'\ndown_revision = '1cebaf56856c'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n tables = op.get_bind().engine.table_names()\n op.add_column('api_tokens', sa.Column('created', sa.DateTime(), nullable=True))\n op.add_column('api_tokens', sa.Column('last_activity', sa.DateTime(), nullable=True))\n op.add_column('api_tokens', sa.Column('note', sa.Unicode(length=1023), nullable=True))\n if 'oauth_access_tokens' in tables:\n op.add_column('oauth_access_tokens', sa.Column('created', sa.DateTime(), nullable=True))\n op.add_column('oauth_access_tokens', sa.Column('last_activity', sa.DateTime(), nullable=True))\n op.create_foreign_key(None, 'oauth_access_tokens', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')\n op.create_foreign_key(None, 'oauth_codes', 'oauth_clients', ['client_id'], ['identifier'], ondelete='CASCADE')\n\n\ndef downgrade():\n op.drop_constraint(None, 'oauth_codes', type_='foreignkey')\n op.drop_constraint(None, 'oauth_access_tokens', type_='foreignkey')\n op.drop_column('oauth_access_tokens', 'last_activity')\n op.drop_column('oauth_access_tokens', 'created')\n op.drop_column('api_tokens', 'note')\n op.drop_column('api_tokens', 'last_activity')\n op.drop_column('api_tokens', 'created')\n", "path": "jupyterhub/alembic/versions/56cc5a70207e_token_tracking.py"}]}
1,693
548
gh_patches_debug_10663
rasdani/github-patches
git_diff
shuup__shuup-2095
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Xtheme: there is no warning for usaved changes when switching between plugins To reproduce: 1. Edit some content in Xtheme editor 2. Select another plugin without saving 3. See your changes to disappear There probably should be warning before switching plugins when you have unsaved information. </issue> <code> [start of shuup/campaigns/admin_module/forms/_basket.py] 1 # This file is part of Shuup. 2 # 3 # Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved. 4 # 5 # This source code is licensed under the OSL-3.0 license found in the 6 # LICENSE file in the root directory of this source tree. 7 from django import forms 8 from django.db.models import Q 9 from django.utils.translation import ugettext_lazy as _ 10 11 from shuup.admin.shop_provider import get_shop 12 from shuup.admin.supplier_provider import get_supplier 13 from shuup.campaigns.models import BasketCampaign, Coupon 14 15 from ._base import BaseCampaignForm, QuickAddCouponSelect 16 17 18 class BasketCampaignForm(BaseCampaignForm): 19 class Meta(BaseCampaignForm.Meta): 20 model = BasketCampaign 21 22 def __init__(self, *args, **kwargs): 23 super(BasketCampaignForm, self).__init__(*args, **kwargs) 24 25 coupons = Coupon.objects.filter( 26 Q(active=True, shop=get_shop(self.request)), 27 Q(campaign=None) | Q(campaign=self.instance), 28 ) 29 supplier = get_supplier(self.request) 30 if supplier: 31 coupons = coupons.filter(supplier=supplier) 32 33 coupon_code_choices = [('', '')] + list(coupons.values_list("pk", "code")) 34 field_kwargs = dict(choices=coupon_code_choices, required=False) 35 field_kwargs["help_text"] = _("Define the required coupon for this campaign.") 36 field_kwargs["label"] = _("Coupon") 37 field_kwargs["widget"] = QuickAddCouponSelect(editable_model="campaigns.Coupon") 38 if self.instance.pk and self.instance.coupon: 39 field_kwargs["initial"] = self.instance.coupon.pk 40 41 self.fields["coupon"] = forms.ChoiceField(**field_kwargs) 42 43 # the supplier will be, by default, the current one 44 if supplier: 45 self.fields["supplier"].widget = forms.HiddenInput() 46 47 def clean_coupon(self): 48 coupon = self.cleaned_data.get("coupon") 49 if coupon: 50 coupon = Coupon.objects.get(pk=coupon) 51 return coupon or None 52 53 def clean_supplier(self): 54 return self.cleaned_data.get("supplier") or get_supplier(self.request) 55 [end of shuup/campaigns/admin_module/forms/_basket.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/shuup/campaigns/admin_module/forms/_basket.py b/shuup/campaigns/admin_module/forms/_basket.py --- a/shuup/campaigns/admin_module/forms/_basket.py +++ b/shuup/campaigns/admin_module/forms/_basket.py @@ -30,7 +30,7 @@ if supplier: coupons = coupons.filter(supplier=supplier) - coupon_code_choices = [('', '')] + list(coupons.values_list("pk", "code")) + coupon_code_choices = [('', '---------')] + list(coupons.values_list("pk", "code")) field_kwargs = dict(choices=coupon_code_choices, required=False) field_kwargs["help_text"] = _("Define the required coupon for this campaign.") field_kwargs["label"] = _("Coupon")
{"golden_diff": "diff --git a/shuup/campaigns/admin_module/forms/_basket.py b/shuup/campaigns/admin_module/forms/_basket.py\n--- a/shuup/campaigns/admin_module/forms/_basket.py\n+++ b/shuup/campaigns/admin_module/forms/_basket.py\n@@ -30,7 +30,7 @@\n if supplier:\n coupons = coupons.filter(supplier=supplier)\n \n- coupon_code_choices = [('', '')] + list(coupons.values_list(\"pk\", \"code\"))\n+ coupon_code_choices = [('', '---------')] + list(coupons.values_list(\"pk\", \"code\"))\n field_kwargs = dict(choices=coupon_code_choices, required=False)\n field_kwargs[\"help_text\"] = _(\"Define the required coupon for this campaign.\")\n field_kwargs[\"label\"] = _(\"Coupon\")\n", "issue": "Xtheme: there is no warning for usaved changes when switching between plugins\nTo reproduce:\r\n1. Edit some content in Xtheme editor\r\n2. Select another plugin without saving\r\n3. See your changes to disappear\r\n\r\nThere probably should be warning before switching plugins when you have unsaved information.\n", "before_files": [{"content": "# This file is part of Shuup.\n#\n# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom django import forms\nfrom django.db.models import Q\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom shuup.admin.shop_provider import get_shop\nfrom shuup.admin.supplier_provider import get_supplier\nfrom shuup.campaigns.models import BasketCampaign, Coupon\n\nfrom ._base import BaseCampaignForm, QuickAddCouponSelect\n\n\nclass BasketCampaignForm(BaseCampaignForm):\n class Meta(BaseCampaignForm.Meta):\n model = BasketCampaign\n\n def __init__(self, *args, **kwargs):\n super(BasketCampaignForm, self).__init__(*args, **kwargs)\n\n coupons = Coupon.objects.filter(\n Q(active=True, shop=get_shop(self.request)),\n Q(campaign=None) | Q(campaign=self.instance),\n )\n supplier = get_supplier(self.request)\n if supplier:\n coupons = coupons.filter(supplier=supplier)\n\n coupon_code_choices = [('', '')] + list(coupons.values_list(\"pk\", \"code\"))\n field_kwargs = dict(choices=coupon_code_choices, required=False)\n field_kwargs[\"help_text\"] = _(\"Define the required coupon for this campaign.\")\n field_kwargs[\"label\"] = _(\"Coupon\")\n field_kwargs[\"widget\"] = QuickAddCouponSelect(editable_model=\"campaigns.Coupon\")\n if self.instance.pk and self.instance.coupon:\n field_kwargs[\"initial\"] = self.instance.coupon.pk\n\n self.fields[\"coupon\"] = forms.ChoiceField(**field_kwargs)\n\n # the supplier will be, by default, the current one\n if supplier:\n self.fields[\"supplier\"].widget = forms.HiddenInput()\n\n def clean_coupon(self):\n coupon = self.cleaned_data.get(\"coupon\")\n if coupon:\n coupon = Coupon.objects.get(pk=coupon)\n return coupon or None\n\n def clean_supplier(self):\n return self.cleaned_data.get(\"supplier\") or get_supplier(self.request)\n", "path": "shuup/campaigns/admin_module/forms/_basket.py"}]}
1,177
176
gh_patches_debug_30949
rasdani/github-patches
git_diff
apache__airflow-24496
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> db+ string in result backend but not metadata secret ### Official Helm Chart version 1.1.0 (latest released) ### Apache Airflow version 2.1.3 (latest released) ### Kubernetes Version 1.21 ### Helm Chart configuration data: metadataSecretName: "airflow-metadata" resultBackendSecretName: "airflow-result-backend" ### Docker Image customisations _No response_ ### What happened If we only supply 1 secret with ``` connection: postgresql://airflow:[email protected]:5432/airflow?sslmode=disable ``` To use for both metadata and resultBackendConnection then we end up with a connection error because resultBackendConnection expects the string to be formatted like ``` connection: db+postgresql://airflow:[email protected]:5432/airflow?sslmode=disable ``` from what i can tell ### What you expected to happen I'd expect to be able to use the same secret for both using the same format if they are using the same connection. ### How to reproduce Make a secret structured like above to look like the metadataConnection auto-generated secret. use that same secret for the result backend. deploy. ### Anything else Occurs always. To get around currently we make 2 secrets one with just the db+ prepended. ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md) </issue> <code> [start of airflow/config_templates/default_celery.py] 1 # 2 # Licensed to the Apache Software Foundation (ASF) under one 3 # or more contributor license agreements. See the NOTICE file 4 # distributed with this work for additional information 5 # regarding copyright ownership. The ASF licenses this file 6 # to you under the Apache License, Version 2.0 (the 7 # "License"); you may not use this file except in compliance 8 # with the License. You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, 13 # software distributed under the License is distributed on an 14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 # KIND, either express or implied. See the License for the 16 # specific language governing permissions and limitations 17 # under the License. 18 """Default celery configuration.""" 19 import logging 20 import ssl 21 22 from airflow.configuration import conf 23 from airflow.exceptions import AirflowConfigException, AirflowException 24 25 26 def _broker_supports_visibility_timeout(url): 27 return url.startswith("redis://") or url.startswith("sqs://") 28 29 30 log = logging.getLogger(__name__) 31 32 broker_url = conf.get('celery', 'BROKER_URL') 33 34 broker_transport_options = conf.getsection('celery_broker_transport_options') or {} 35 if 'visibility_timeout' not in broker_transport_options: 36 if _broker_supports_visibility_timeout(broker_url): 37 broker_transport_options['visibility_timeout'] = 21600 38 39 DEFAULT_CELERY_CONFIG = { 40 'accept_content': ['json'], 41 'event_serializer': 'json', 42 'worker_prefetch_multiplier': conf.getint('celery', 'worker_prefetch_multiplier'), 43 'task_acks_late': True, 44 'task_default_queue': conf.get('operators', 'DEFAULT_QUEUE'), 45 'task_default_exchange': conf.get('operators', 'DEFAULT_QUEUE'), 46 'task_track_started': conf.getboolean('celery', 'task_track_started'), 47 'broker_url': broker_url, 48 'broker_transport_options': broker_transport_options, 49 'result_backend': conf.get('celery', 'RESULT_BACKEND'), 50 'worker_concurrency': conf.getint('celery', 'WORKER_CONCURRENCY'), 51 'worker_enable_remote_control': conf.getboolean('celery', 'worker_enable_remote_control'), 52 } 53 54 celery_ssl_active = False 55 try: 56 celery_ssl_active = conf.getboolean('celery', 'SSL_ACTIVE') 57 except AirflowConfigException: 58 log.warning("Celery Executor will run without SSL") 59 60 try: 61 if celery_ssl_active: 62 if broker_url and 'amqp://' in broker_url: 63 broker_use_ssl = { 64 'keyfile': conf.get('celery', 'SSL_KEY'), 65 'certfile': conf.get('celery', 'SSL_CERT'), 66 'ca_certs': conf.get('celery', 'SSL_CACERT'), 67 'cert_reqs': ssl.CERT_REQUIRED, 68 } 69 elif broker_url and 'redis://' in broker_url: 70 broker_use_ssl = { 71 'ssl_keyfile': conf.get('celery', 'SSL_KEY'), 72 'ssl_certfile': conf.get('celery', 'SSL_CERT'), 73 'ssl_ca_certs': conf.get('celery', 'SSL_CACERT'), 74 'ssl_cert_reqs': ssl.CERT_REQUIRED, 75 } 76 else: 77 raise AirflowException( 78 'The broker you configured does not support SSL_ACTIVE to be True. ' 79 'Please use RabbitMQ or Redis if you would like to use SSL for broker.' 80 ) 81 82 DEFAULT_CELERY_CONFIG['broker_use_ssl'] = broker_use_ssl 83 except AirflowConfigException: 84 raise AirflowException( 85 'AirflowConfigException: SSL_ACTIVE is True, ' 86 'please ensure SSL_KEY, ' 87 'SSL_CERT and SSL_CACERT are set' 88 ) 89 except Exception as e: 90 raise AirflowException( 91 f'Exception: There was an unknown Celery SSL Error. Please ensure you want to use SSL and/or have ' 92 f'all necessary certs and key ({e}).' 93 ) 94 95 result_backend = str(DEFAULT_CELERY_CONFIG['result_backend']) 96 if 'amqp://' in result_backend or 'redis://' in result_backend or 'rpc://' in result_backend: 97 log.warning( 98 "You have configured a result_backend of %s, it is highly recommended " 99 "to use an alternative result_backend (i.e. a database).", 100 result_backend, 101 ) 102 [end of airflow/config_templates/default_celery.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/airflow/config_templates/default_celery.py b/airflow/config_templates/default_celery.py --- a/airflow/config_templates/default_celery.py +++ b/airflow/config_templates/default_celery.py @@ -36,6 +36,12 @@ if _broker_supports_visibility_timeout(broker_url): broker_transport_options['visibility_timeout'] = 21600 +if conf.has_option("celery", 'RESULT_BACKEND'): + result_backend = conf.get_mandatory_value('celery', 'RESULT_BACKEND') +else: + log.debug("Value for celery result_backend not found. Using sql_alchemy_conn with db+ prefix.") + result_backend = f'db+{conf.get("database", "SQL_ALCHEMY_CONN")}' + DEFAULT_CELERY_CONFIG = { 'accept_content': ['json'], 'event_serializer': 'json', @@ -46,7 +52,7 @@ 'task_track_started': conf.getboolean('celery', 'task_track_started'), 'broker_url': broker_url, 'broker_transport_options': broker_transport_options, - 'result_backend': conf.get('celery', 'RESULT_BACKEND'), + 'result_backend': result_backend, 'worker_concurrency': conf.getint('celery', 'WORKER_CONCURRENCY'), 'worker_enable_remote_control': conf.getboolean('celery', 'worker_enable_remote_control'), } @@ -92,7 +98,6 @@ f'all necessary certs and key ({e}).' ) -result_backend = str(DEFAULT_CELERY_CONFIG['result_backend']) if 'amqp://' in result_backend or 'redis://' in result_backend or 'rpc://' in result_backend: log.warning( "You have configured a result_backend of %s, it is highly recommended "
{"golden_diff": "diff --git a/airflow/config_templates/default_celery.py b/airflow/config_templates/default_celery.py\n--- a/airflow/config_templates/default_celery.py\n+++ b/airflow/config_templates/default_celery.py\n@@ -36,6 +36,12 @@\n if _broker_supports_visibility_timeout(broker_url):\n broker_transport_options['visibility_timeout'] = 21600\n \n+if conf.has_option(\"celery\", 'RESULT_BACKEND'):\n+ result_backend = conf.get_mandatory_value('celery', 'RESULT_BACKEND')\n+else:\n+ log.debug(\"Value for celery result_backend not found. Using sql_alchemy_conn with db+ prefix.\")\n+ result_backend = f'db+{conf.get(\"database\", \"SQL_ALCHEMY_CONN\")}'\n+\n DEFAULT_CELERY_CONFIG = {\n 'accept_content': ['json'],\n 'event_serializer': 'json',\n@@ -46,7 +52,7 @@\n 'task_track_started': conf.getboolean('celery', 'task_track_started'),\n 'broker_url': broker_url,\n 'broker_transport_options': broker_transport_options,\n- 'result_backend': conf.get('celery', 'RESULT_BACKEND'),\n+ 'result_backend': result_backend,\n 'worker_concurrency': conf.getint('celery', 'WORKER_CONCURRENCY'),\n 'worker_enable_remote_control': conf.getboolean('celery', 'worker_enable_remote_control'),\n }\n@@ -92,7 +98,6 @@\n f'all necessary certs and key ({e}).'\n )\n \n-result_backend = str(DEFAULT_CELERY_CONFIG['result_backend'])\n if 'amqp://' in result_backend or 'redis://' in result_backend or 'rpc://' in result_backend:\n log.warning(\n \"You have configured a result_backend of %s, it is highly recommended \"\n", "issue": "db+ string in result backend but not metadata secret\n### Official Helm Chart version\n\n1.1.0 (latest released)\n\n### Apache Airflow version\n\n2.1.3 (latest released)\n\n### Kubernetes Version\n\n1.21\n\n### Helm Chart configuration\n\n data:\r\n metadataSecretName: \"airflow-metadata\"\r\n resultBackendSecretName: \"airflow-result-backend\"\n\n### Docker Image customisations\n\n_No response_\n\n### What happened\n\nIf we only supply 1 secret with \r\n```\r\nconnection: postgresql://airflow:[email protected]:5432/airflow?sslmode=disable\r\n```\r\nTo use for both metadata and resultBackendConnection then we end up with a connection error because\r\nresultBackendConnection expects the string to be formatted like \r\n```\r\nconnection: db+postgresql://airflow:[email protected]:5432/airflow?sslmode=disable\r\n```\r\nfrom what i can tell\n\n### What you expected to happen\n\nI'd expect to be able to use the same secret for both using the same format if they are using the same connection. \n\n### How to reproduce\n\nMake a secret structured like above to look like the metadataConnection auto-generated secret.\r\nuse that same secret for the result backend.\r\ndeploy.\r\n\n\n### Anything else\n\nOccurs always. \r\nTo get around currently we make 2 secrets one with just the db+ prepended. \n\n### Are you willing to submit PR?\n\n- [ ] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Default celery configuration.\"\"\"\nimport logging\nimport ssl\n\nfrom airflow.configuration import conf\nfrom airflow.exceptions import AirflowConfigException, AirflowException\n\n\ndef _broker_supports_visibility_timeout(url):\n return url.startswith(\"redis://\") or url.startswith(\"sqs://\")\n\n\nlog = logging.getLogger(__name__)\n\nbroker_url = conf.get('celery', 'BROKER_URL')\n\nbroker_transport_options = conf.getsection('celery_broker_transport_options') or {}\nif 'visibility_timeout' not in broker_transport_options:\n if _broker_supports_visibility_timeout(broker_url):\n broker_transport_options['visibility_timeout'] = 21600\n\nDEFAULT_CELERY_CONFIG = {\n 'accept_content': ['json'],\n 'event_serializer': 'json',\n 'worker_prefetch_multiplier': conf.getint('celery', 'worker_prefetch_multiplier'),\n 'task_acks_late': True,\n 'task_default_queue': conf.get('operators', 'DEFAULT_QUEUE'),\n 'task_default_exchange': conf.get('operators', 'DEFAULT_QUEUE'),\n 'task_track_started': conf.getboolean('celery', 'task_track_started'),\n 'broker_url': broker_url,\n 'broker_transport_options': broker_transport_options,\n 'result_backend': conf.get('celery', 'RESULT_BACKEND'),\n 'worker_concurrency': conf.getint('celery', 'WORKER_CONCURRENCY'),\n 'worker_enable_remote_control': conf.getboolean('celery', 'worker_enable_remote_control'),\n}\n\ncelery_ssl_active = False\ntry:\n celery_ssl_active = conf.getboolean('celery', 'SSL_ACTIVE')\nexcept AirflowConfigException:\n log.warning(\"Celery Executor will run without SSL\")\n\ntry:\n if celery_ssl_active:\n if broker_url and 'amqp://' in broker_url:\n broker_use_ssl = {\n 'keyfile': conf.get('celery', 'SSL_KEY'),\n 'certfile': conf.get('celery', 'SSL_CERT'),\n 'ca_certs': conf.get('celery', 'SSL_CACERT'),\n 'cert_reqs': ssl.CERT_REQUIRED,\n }\n elif broker_url and 'redis://' in broker_url:\n broker_use_ssl = {\n 'ssl_keyfile': conf.get('celery', 'SSL_KEY'),\n 'ssl_certfile': conf.get('celery', 'SSL_CERT'),\n 'ssl_ca_certs': conf.get('celery', 'SSL_CACERT'),\n 'ssl_cert_reqs': ssl.CERT_REQUIRED,\n }\n else:\n raise AirflowException(\n 'The broker you configured does not support SSL_ACTIVE to be True. '\n 'Please use RabbitMQ or Redis if you would like to use SSL for broker.'\n )\n\n DEFAULT_CELERY_CONFIG['broker_use_ssl'] = broker_use_ssl\nexcept AirflowConfigException:\n raise AirflowException(\n 'AirflowConfigException: SSL_ACTIVE is True, '\n 'please ensure SSL_KEY, '\n 'SSL_CERT and SSL_CACERT are set'\n )\nexcept Exception as e:\n raise AirflowException(\n f'Exception: There was an unknown Celery SSL Error. Please ensure you want to use SSL and/or have '\n f'all necessary certs and key ({e}).'\n )\n\nresult_backend = str(DEFAULT_CELERY_CONFIG['result_backend'])\nif 'amqp://' in result_backend or 'redis://' in result_backend or 'rpc://' in result_backend:\n log.warning(\n \"You have configured a result_backend of %s, it is highly recommended \"\n \"to use an alternative result_backend (i.e. a database).\",\n result_backend,\n )\n", "path": "airflow/config_templates/default_celery.py"}]}
2,037
394
gh_patches_debug_18040
rasdani/github-patches
git_diff
liqd__a4-meinberlin-1652
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> make it possible to detach plan from project now that a connection of a project with a plan is no longer obligatory, initiators need to be able to undo a connection. Once I click on a plan I can no longer NOT connect to a plan, which should be possible. ![image](https://user-images.githubusercontent.com/35491681/47439596-f041f000-d7ac-11e8-85f8-13407e97ada3.png) </issue> <code> [start of meinberlin/apps/plans/forms.py] 1 from django import forms 2 from django.conf import settings 3 from django.db.models import Q 4 from django.utils.translation import ugettext_lazy as _ 5 6 from adhocracy4.dashboard.components.forms import ProjectDashboardForm 7 from adhocracy4.maps import widgets as maps_widgets 8 from adhocracy4.projects import models as project_models 9 from meinberlin.apps.contrib import widgets as contrib_widgets 10 11 from . import models 12 13 14 def get_theme_options(): 15 return models.Plan.objects\ 16 .filter(~Q(theme=''))\ 17 .order_by('theme')\ 18 .values_list('theme', flat=True)\ 19 .distinct() 20 21 22 class PlanForm(forms.ModelForm): 23 24 class Meta: 25 model = models.Plan 26 fields = [ 27 'title', 28 'description_image', 29 'contact', 30 'point', 31 'point_label', 32 'district', 33 'cost', 34 'description', 35 'theme', 36 'status', 37 'participation'] 38 widgets = { 39 'point': maps_widgets.MapChoosePointWidget( 40 polygon=settings.BERLIN_POLYGON), 41 'theme': contrib_widgets.TextWithDatalistWidget(attrs={ 42 'options': get_theme_options 43 }) 44 } 45 error_messages = { 46 'point': { 47 'required': _('Please locate the plan on the map.') 48 } 49 } 50 51 def __init__(self, *args, **kwargs): 52 super().__init__(*args, **kwargs) 53 self.fields['district'].empty_label = _('City wide') 54 55 56 class CustomMultipleChoiceField(forms.ModelMultipleChoiceField): 57 58 widget = forms.RadioSelect 59 60 def clean(self, value): 61 if value is None: 62 return super().clean([]) 63 return super().clean([value]) 64 65 66 class ProjectPlansDashboardForm(ProjectDashboardForm): 67 plans = CustomMultipleChoiceField(queryset=None, 68 label=_('Plans')) 69 70 class Meta: 71 model = project_models.Project 72 fields = ['plans'] 73 required = False 74 75 def save(self, commit=False): 76 plans = self.cleaned_data['plans'] 77 self.instance.plans.set(plans) 78 79 def __init__(self, *args, **kwargs): 80 super().__init__(*args, **kwargs) 81 self.initial['plans'] = self.instance.plans.all() 82 self.fields['plans' 83 ].queryset = self.instance.organisation.plan_set.all() 84 [end of meinberlin/apps/plans/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/plans/forms.py b/meinberlin/apps/plans/forms.py --- a/meinberlin/apps/plans/forms.py +++ b/meinberlin/apps/plans/forms.py @@ -55,10 +55,10 @@ class CustomMultipleChoiceField(forms.ModelMultipleChoiceField): - widget = forms.RadioSelect + widget = forms.Select def clean(self, value): - if value is None: + if not value: return super().clean([]) return super().clean([value]) @@ -79,5 +79,7 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.initial['plans'] = self.instance.plans.all() - self.fields['plans' - ].queryset = self.instance.organisation.plan_set.all() + self.fields['plans'].required = False + self.fields['plans'].empty_label = '----------' + self.fields['plans'].queryset = \ + self.instance.organisation.plan_set.all()
{"golden_diff": "diff --git a/meinberlin/apps/plans/forms.py b/meinberlin/apps/plans/forms.py\n--- a/meinberlin/apps/plans/forms.py\n+++ b/meinberlin/apps/plans/forms.py\n@@ -55,10 +55,10 @@\n \n class CustomMultipleChoiceField(forms.ModelMultipleChoiceField):\n \n- widget = forms.RadioSelect\n+ widget = forms.Select\n \n def clean(self, value):\n- if value is None:\n+ if not value:\n return super().clean([])\n return super().clean([value])\n \n@@ -79,5 +79,7 @@\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.initial['plans'] = self.instance.plans.all()\n- self.fields['plans'\n- ].queryset = self.instance.organisation.plan_set.all()\n+ self.fields['plans'].required = False\n+ self.fields['plans'].empty_label = '----------'\n+ self.fields['plans'].queryset = \\\n+ self.instance.organisation.plan_set.all()\n", "issue": "make it possible to detach plan from project\nnow that a connection of a project with a plan is no longer obligatory, initiators need to be able to undo a connection. Once I click on a plan I can no longer NOT connect to a plan, which should be possible.\r\n\r\n![image](https://user-images.githubusercontent.com/35491681/47439596-f041f000-d7ac-11e8-85f8-13407e97ada3.png)\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.components.forms import ProjectDashboardForm\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom adhocracy4.projects import models as project_models\nfrom meinberlin.apps.contrib import widgets as contrib_widgets\n\nfrom . import models\n\n\ndef get_theme_options():\n return models.Plan.objects\\\n .filter(~Q(theme=''))\\\n .order_by('theme')\\\n .values_list('theme', flat=True)\\\n .distinct()\n\n\nclass PlanForm(forms.ModelForm):\n\n class Meta:\n model = models.Plan\n fields = [\n 'title',\n 'description_image',\n 'contact',\n 'point',\n 'point_label',\n 'district',\n 'cost',\n 'description',\n 'theme',\n 'status',\n 'participation']\n widgets = {\n 'point': maps_widgets.MapChoosePointWidget(\n polygon=settings.BERLIN_POLYGON),\n 'theme': contrib_widgets.TextWithDatalistWidget(attrs={\n 'options': get_theme_options\n })\n }\n error_messages = {\n 'point': {\n 'required': _('Please locate the plan on the map.')\n }\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['district'].empty_label = _('City wide')\n\n\nclass CustomMultipleChoiceField(forms.ModelMultipleChoiceField):\n\n widget = forms.RadioSelect\n\n def clean(self, value):\n if value is None:\n return super().clean([])\n return super().clean([value])\n\n\nclass ProjectPlansDashboardForm(ProjectDashboardForm):\n plans = CustomMultipleChoiceField(queryset=None,\n label=_('Plans'))\n\n class Meta:\n model = project_models.Project\n fields = ['plans']\n required = False\n\n def save(self, commit=False):\n plans = self.cleaned_data['plans']\n self.instance.plans.set(plans)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.initial['plans'] = self.instance.plans.all()\n self.fields['plans'\n ].queryset = self.instance.organisation.plan_set.all()\n", "path": "meinberlin/apps/plans/forms.py"}]}
1,317
241
gh_patches_debug_1022
rasdani/github-patches
git_diff
searxng__searxng-437
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: microsoft academic engine **Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG** Repository: https://github.com/tiekoetter/searxng Branch: master Version: 1.0.0-972-93548243 <!-- Check if these values are correct --> **How did you install SearXNG?** <!-- Did you install SearXNG using the official wiki or using searxng-docker or manually by executing the searx/webapp.py file? --> **What happened?** <!-- A clear and concise description of what the bug is. --> **How To Reproduce** <!-- How can we reproduce this issue? (as minimally and as precisely as possible) --> **Expected behavior** <!-- A clear and concise description of what you expected to happen. --> **Screenshots & Logs** <!-- If applicable, add screenshots, logs to help explain your problem. --> **Additional context** <!-- Add any other context about the problem here. --> **Technical report** Error * Error: httpx.TimeoutException * Percentage: 50 * Parameters: `(None, None, None)` * File name: `searx/search/processors/online.py:97` * Function: `_send_http_request` * Code: `response = req(params['url'], **request_args)` </issue> <code> [start of searx/engines/microsoft_academic.py] 1 # SPDX-License-Identifier: AGPL-3.0-or-later 2 """ 3 Microsoft Academic (Science) 4 """ 5 6 from json import dumps, loads 7 from searx.utils import html_to_text 8 9 # about 10 about = { 11 "website": 'https://academic.microsoft.com', 12 "wikidata_id": 'Q28136779', 13 "official_api_documentation": 'http://ma-graph.org/', 14 "use_official_api": False, 15 "require_api_key": False, 16 "results": 'JSON', 17 } 18 19 categories = ['images'] 20 paging = True 21 search_url = 'https://academic.microsoft.com/api/search' 22 _paper_url = 'https://academic.microsoft.com/paper/{id}/reference' 23 24 25 def request(query, params): 26 params['url'] = search_url 27 params['method'] = 'POST' 28 params['headers']['content-type'] = 'application/json; charset=utf-8' 29 params['data'] = dumps({ 30 'query': query, 31 'queryExpression': '', 32 'filters': [], 33 'orderBy': 0, 34 'skip': (params['pageno'] - 1) * 10, 35 'sortAscending': True, 36 'take': 10, 37 'includeCitationContexts': False, 38 'profileId': '', 39 }) 40 41 return params 42 43 44 def response(resp): 45 results = [] 46 response_data = loads(resp.text) 47 if not response_data: 48 return results 49 50 for result in response_data['pr']: 51 if 'dn' not in result['paper']: 52 continue 53 54 title = result['paper']['dn'] 55 content = _get_content(result['paper']) 56 url = _paper_url.format(id=result['paper']['id']) 57 results.append({ 58 'url': url, 59 'title': html_to_text(title), 60 'content': html_to_text(content), 61 }) 62 63 return results 64 65 66 def _get_content(result): 67 if 'd' in result: 68 content = result['d'] 69 if len(content) > 300: 70 return content[:300] + '...' 71 return content 72 73 return '' 74 [end of searx/engines/microsoft_academic.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/searx/engines/microsoft_academic.py b/searx/engines/microsoft_academic.py --- a/searx/engines/microsoft_academic.py +++ b/searx/engines/microsoft_academic.py @@ -47,7 +47,7 @@ if not response_data: return results - for result in response_data['pr']: + for result in response_data.get('pr', {}): if 'dn' not in result['paper']: continue
{"golden_diff": "diff --git a/searx/engines/microsoft_academic.py b/searx/engines/microsoft_academic.py\n--- a/searx/engines/microsoft_academic.py\n+++ b/searx/engines/microsoft_academic.py\n@@ -47,7 +47,7 @@\n if not response_data:\n return results\n \n- for result in response_data['pr']:\n+ for result in response_data.get('pr', {}):\n if 'dn' not in result['paper']:\n continue\n", "issue": "Bug: microsoft academic engine\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\nRepository: https://github.com/tiekoetter/searxng\r\nBranch: master\r\nVersion: 1.0.0-972-93548243\r\n<!-- Check if these values are correct -->\r\n\r\n**How did you install SearXNG?**\r\n<!-- Did you install SearXNG using the official wiki or using searxng-docker\r\nor manually by executing the searx/webapp.py file? -->\r\n**What happened?**\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n**How To Reproduce**\r\n<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n**Screenshots & Logs**\r\n<!-- If applicable, add screenshots, logs to help explain your problem. -->\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\n\r\n**Technical report**\r\n\r\nError\r\n * Error: httpx.TimeoutException\r\n * Percentage: 50\r\n * Parameters: `(None, None, None)`\r\n * File name: `searx/search/processors/online.py:97`\r\n * Function: `_send_http_request`\r\n * Code: `response = req(params['url'], **request_args)`\r\n\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"\n Microsoft Academic (Science)\n\"\"\"\n\nfrom json import dumps, loads\nfrom searx.utils import html_to_text\n\n# about\nabout = {\n \"website\": 'https://academic.microsoft.com',\n \"wikidata_id\": 'Q28136779',\n \"official_api_documentation\": 'http://ma-graph.org/',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'JSON',\n}\n\ncategories = ['images']\npaging = True\nsearch_url = 'https://academic.microsoft.com/api/search'\n_paper_url = 'https://academic.microsoft.com/paper/{id}/reference'\n\n\ndef request(query, params):\n params['url'] = search_url\n params['method'] = 'POST'\n params['headers']['content-type'] = 'application/json; charset=utf-8'\n params['data'] = dumps({\n 'query': query,\n 'queryExpression': '',\n 'filters': [],\n 'orderBy': 0,\n 'skip': (params['pageno'] - 1) * 10,\n 'sortAscending': True,\n 'take': 10,\n 'includeCitationContexts': False,\n 'profileId': '',\n })\n\n return params\n\n\ndef response(resp):\n results = []\n response_data = loads(resp.text)\n if not response_data:\n return results\n\n for result in response_data['pr']:\n if 'dn' not in result['paper']:\n continue\n\n title = result['paper']['dn']\n content = _get_content(result['paper'])\n url = _paper_url.format(id=result['paper']['id'])\n results.append({\n 'url': url,\n 'title': html_to_text(title),\n 'content': html_to_text(content),\n })\n\n return results\n\n\ndef _get_content(result):\n if 'd' in result:\n content = result['d']\n if len(content) > 300:\n return content[:300] + '...'\n return content\n\n return ''\n", "path": "searx/engines/microsoft_academic.py"}]}
1,458
116
gh_patches_debug_552
rasdani/github-patches
git_diff
pex-tool__pex-880
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.1 On the docket: + [x] PEX 2.1.0 regression: pex file won't build inside a running docker image as user #850 + [x] Fully pin vendored requirements. #853 + [x] Fix `tox -epackage` to create pex supporting 3.8. #843 + [x] Pex erroneously warns about needing to use vendored `pkg_resources` for distributions with empty `namespace_packages.txt` metadata files. #840 + [x] Interpreter discovery and pyenv don't interact well #782 + [x] ensure_python_interpreter() bootstrapping broken on pypy shard #477 + [x] Resolve error checking does not account for environment markers. #851 + [x] Ensure Pex PEX contraints match pex wheel / sdist. #863 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = '2.1.0' 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = '2.1.0' +__version__ = '2.1.1'
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.1.0'\n+__version__ = '2.1.1'\n", "issue": "Release 2.1.1\nOn the docket:\r\n+ [x] PEX 2.1.0 regression: pex file won't build inside a running docker image as user #850\r\n+ [x] Fully pin vendored requirements. #853\r\n+ [x] Fix `tox -epackage` to create pex supporting 3.8. #843\r\n+ [x] Pex erroneously warns about needing to use vendored `pkg_resources` for distributions with empty `namespace_packages.txt` metadata files. #840\r\n+ [x] Interpreter discovery and pyenv don't interact well #782\r\n+ [x] ensure_python_interpreter() bootstrapping broken on pypy shard #477\r\n+ [x] Resolve error checking does not account for environment markers. #851\r\n+ [x] Ensure Pex PEX contraints match pex wheel / sdist. #863\r\n\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.0'\n", "path": "pex/version.py"}]}
791
95
gh_patches_debug_24
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-2007
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG]: ModuleNotFoundError: No module named 'colossalai.nn.optimizer.zero_optimizer' ### 🐛 Describe the bug I install colossalAI with the command `pip install colossalai==0.1.11rc3+torch1.12cu11.3 -f https://release.colossalai.org` But I get an error when follow https://github.com/hpcaitech/ColossalAI/tree/main/examples/tutorial#-run-opt-finetuning-and-inference, I just run `bash ./run_clm_synthetic.sh` and get an error as follows: ```shell ╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ /home/he.yan/ColossalAI/examples/tutorial/opt/opt/run_clm.py:46 in <module> │ │ │ │ 43 from colossalai.core import global_context as gpc │ │ 44 from colossalai.logging import disable_existing_loggers, get_dist_logger │ │ 45 from colossalai.nn.optimizer import HybridAdam │ │ ❱ 46 from colossalai.nn.optimizer.zero_optimizer import ZeroOptimizer │ │ 47 from colossalai.nn.parallel import ZeroDDP │ │ 48 from colossalai.tensor import ProcessGroup │ │ 49 from colossalai.utils import get_current_device, get_dataloader │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ ModuleNotFoundError: No module named 'colossalai.nn.optimizer.zero_optimizer' ``` ### Environment Python 3.8.15 torch1.12cu11.3 </issue> <code> [start of colossalai/__init__.py] 1 from .initialize import ( 2 get_default_parser, 3 initialize, 4 launch, 5 launch_from_openmpi, 6 launch_from_slurm, 7 launch_from_torch, 8 ) 9 10 __version__ = '0.1.11rc2' 11 [end of colossalai/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/colossalai/__init__.py b/colossalai/__init__.py --- a/colossalai/__init__.py +++ b/colossalai/__init__.py @@ -7,4 +7,4 @@ launch_from_torch, ) -__version__ = '0.1.11rc2' +__version__ = '0.1.11rc4'
{"golden_diff": "diff --git a/colossalai/__init__.py b/colossalai/__init__.py\n--- a/colossalai/__init__.py\n+++ b/colossalai/__init__.py\n@@ -7,4 +7,4 @@\n launch_from_torch,\n )\n \n-__version__ = '0.1.11rc2'\n+__version__ = '0.1.11rc4'\n", "issue": "[BUG]: ModuleNotFoundError: No module named 'colossalai.nn.optimizer.zero_optimizer'\n### \ud83d\udc1b Describe the bug\r\n\r\nI install colossalAI with the command `pip install colossalai==0.1.11rc3+torch1.12cu11.3 -f https://release.colossalai.org`\r\nBut I get an error when follow https://github.com/hpcaitech/ColossalAI/tree/main/examples/tutorial#-run-opt-finetuning-and-inference, I just run `bash ./run_clm_synthetic.sh` and get an error as follows:\r\n\r\n```shell\r\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 Traceback (most recent call last) \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\r\n\u2502 /home/he.yan/ColossalAI/examples/tutorial/opt/opt/run_clm.py:46 in <module> \u2502\r\n\u2502 \u2502\r\n\u2502 43 from colossalai.core import global_context as gpc \u2502\r\n\u2502 44 from colossalai.logging import disable_existing_loggers, get_dist_logger \u2502\r\n\u2502 45 from colossalai.nn.optimizer import HybridAdam \u2502\r\n\u2502 \u2771 46 from colossalai.nn.optimizer.zero_optimizer import ZeroOptimizer \u2502\r\n\u2502 47 from colossalai.nn.parallel import ZeroDDP \u2502\r\n\u2502 48 from colossalai.tensor import ProcessGroup \u2502\r\n\u2502 49 from colossalai.utils import get_current_device, get_dataloader \u2502\r\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\r\nModuleNotFoundError: No module named 'colossalai.nn.optimizer.zero_optimizer'\r\n```\r\n\r\n### Environment\r\n\r\nPython 3.8.15\r\ntorch1.12cu11.3\n", "before_files": [{"content": "from .initialize import (\n get_default_parser,\n initialize,\n launch,\n launch_from_openmpi,\n launch_from_slurm,\n launch_from_torch,\n)\n\n__version__ = '0.1.11rc2'\n", "path": "colossalai/__init__.py"}]}
967
93
gh_patches_debug_2151
rasdani/github-patches
git_diff
WeblateOrg__weblate-4665
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> migrations fail for database name containing "-" **Describe the bug** Applying memory.0007_use_trigram...Traceback (most recent call last): File "/usr/lib/python3.6/site-packages/django/db/backends/utils.py", line 84, in _execute return self.cursor.execute(sql, params) psycopg2.errors.SyntaxError: syntax error at or near "-" LINE 1: ALTER DATABASE weblate-staging SET pg_trgm.similarity_thresh... ^ **To Reproduce** Set the database name to "weblate-staging" I worked around this by changing of ALTER DATABASE {} SET to ALTER DATABASE \"{}\" SET in 0007_use_trigram.py and 0008_adjust_similarity.py. weblate-4.1.1 </issue> <code> [start of weblate/memory/migrations/0008_adjust_similarity.py] 1 # Generated by Django 3.0.5 on 2020-05-12 11:44 2 3 from django.db import migrations 4 5 6 def update_index(apps, schema_editor): 7 if schema_editor.connection.vendor != "postgresql": 8 return 9 # This ensures that extensions are loaded into the session. Without that 10 # the next ALTER database fails unless we're running as superuser (which 11 # is allowed to set non existing parameters, so missing extension doesn't 12 # matter) 13 # See https://www.postgresql.org/message-id/6376.1533675236%40sss.pgh.pa.us 14 schema_editor.execute("SELECT show_limit()") 15 16 schema_editor.execute( 17 "ALTER ROLE {} SET pg_trgm.similarity_threshold = 0.5".format( 18 schema_editor.connection.settings_dict["USER"] 19 ) 20 ) 21 22 23 class Migration(migrations.Migration): 24 25 dependencies = [ 26 ("memory", "0007_use_trigram"), 27 ] 28 29 operations = [ 30 migrations.RunPython( 31 update_index, migrations.RunPython.noop, elidable=False, atomic=False 32 ) 33 ] 34 [end of weblate/memory/migrations/0008_adjust_similarity.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/weblate/memory/migrations/0008_adjust_similarity.py b/weblate/memory/migrations/0008_adjust_similarity.py --- a/weblate/memory/migrations/0008_adjust_similarity.py +++ b/weblate/memory/migrations/0008_adjust_similarity.py @@ -15,7 +15,7 @@ schema_editor.execute( "ALTER ROLE {} SET pg_trgm.similarity_threshold = 0.5".format( - schema_editor.connection.settings_dict["USER"] + schema_editor.quote_name(schema_editor.connection.settings_dict["USER"]) ) )
{"golden_diff": "diff --git a/weblate/memory/migrations/0008_adjust_similarity.py b/weblate/memory/migrations/0008_adjust_similarity.py\n--- a/weblate/memory/migrations/0008_adjust_similarity.py\n+++ b/weblate/memory/migrations/0008_adjust_similarity.py\n@@ -15,7 +15,7 @@\n \n schema_editor.execute(\n \"ALTER ROLE {} SET pg_trgm.similarity_threshold = 0.5\".format(\n- schema_editor.connection.settings_dict[\"USER\"]\n+ schema_editor.quote_name(schema_editor.connection.settings_dict[\"USER\"])\n )\n )\n", "issue": "migrations fail for database name containing \"-\"\n**Describe the bug**\r\n Applying memory.0007_use_trigram...Traceback (most recent call last):\r\n File \"/usr/lib/python3.6/site-packages/django/db/backends/utils.py\", line 84, in _execute\r\n return self.cursor.execute(sql, params)\r\npsycopg2.errors.SyntaxError: syntax error at or near \"-\"\r\nLINE 1: ALTER DATABASE weblate-staging SET pg_trgm.similarity_thresh...\r\n ^\r\n**To Reproduce**\r\nSet the database name to \"weblate-staging\"\r\n\r\nI worked around this by changing of\r\nALTER DATABASE {} SET\r\nto\r\nALTER DATABASE \\\"{}\\\" SET\r\nin 0007_use_trigram.py and 0008_adjust_similarity.py.\r\n\r\nweblate-4.1.1\n", "before_files": [{"content": "# Generated by Django 3.0.5 on 2020-05-12 11:44\n\nfrom django.db import migrations\n\n\ndef update_index(apps, schema_editor):\n if schema_editor.connection.vendor != \"postgresql\":\n return\n # This ensures that extensions are loaded into the session. Without that\n # the next ALTER database fails unless we're running as superuser (which\n # is allowed to set non existing parameters, so missing extension doesn't\n # matter)\n # See https://www.postgresql.org/message-id/6376.1533675236%40sss.pgh.pa.us\n schema_editor.execute(\"SELECT show_limit()\")\n\n schema_editor.execute(\n \"ALTER ROLE {} SET pg_trgm.similarity_threshold = 0.5\".format(\n schema_editor.connection.settings_dict[\"USER\"]\n )\n )\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"memory\", \"0007_use_trigram\"),\n ]\n\n operations = [\n migrations.RunPython(\n update_index, migrations.RunPython.noop, elidable=False, atomic=False\n )\n ]\n", "path": "weblate/memory/migrations/0008_adjust_similarity.py"}]}
1,044
135
gh_patches_debug_2298
rasdani/github-patches
git_diff
iterative__dvc-1436
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> repro: --all-pipelines doesn't do anything. </issue> <code> [start of dvc/command/repro.py] 1 import os 2 3 from dvc.command.base import CmdBase 4 from dvc.command.status import CmdDataStatus 5 from dvc.exceptions import DvcException 6 7 8 class CmdRepro(CmdBase): 9 def run(self): 10 recursive = not self.args.single_item 11 saved_dir = os.path.realpath(os.curdir) 12 if self.args.cwd: 13 os.chdir(self.args.cwd) 14 15 ret = 0 16 for target in self.args.targets: 17 try: 18 stages = self.project.reproduce( 19 target, 20 recursive=recursive, 21 force=self.args.force, 22 dry=self.args.dry, 23 interactive=self.args.interactive, 24 pipeline=self.args.pipeline, 25 all_pipelines=self.args.all_pipelines, 26 ignore_build_cache=self.args.ignore_build_cache) 27 28 if len(stages) == 0: 29 self.project.logger.info(CmdDataStatus.UP_TO_DATE_MSG) 30 31 if self.args.metrics: 32 self.project.metrics_show() 33 except DvcException as ex: 34 msg = 'Failed to reproduce \'{}\''.format(target) 35 self.project.logger.error(msg, ex) 36 ret = 1 37 break 38 39 os.chdir(saved_dir) 40 return ret 41 [end of dvc/command/repro.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/command/repro.py b/dvc/command/repro.py --- a/dvc/command/repro.py +++ b/dvc/command/repro.py @@ -12,6 +12,10 @@ if self.args.cwd: os.chdir(self.args.cwd) + # Dirty hack so the for loop below can at least enter once + if self.args.all_pipelines: + self.args.targets = [None] + ret = 0 for target in self.args.targets: try:
{"golden_diff": "diff --git a/dvc/command/repro.py b/dvc/command/repro.py\n--- a/dvc/command/repro.py\n+++ b/dvc/command/repro.py\n@@ -12,6 +12,10 @@\n if self.args.cwd:\n os.chdir(self.args.cwd)\n \n+ # Dirty hack so the for loop below can at least enter once\n+ if self.args.all_pipelines:\n+ self.args.targets = [None]\n+\n ret = 0\n for target in self.args.targets:\n try:\n", "issue": "repro: --all-pipelines doesn't do anything.\n\n", "before_files": [{"content": "import os\n\nfrom dvc.command.base import CmdBase\nfrom dvc.command.status import CmdDataStatus\nfrom dvc.exceptions import DvcException\n\n\nclass CmdRepro(CmdBase):\n def run(self):\n recursive = not self.args.single_item\n saved_dir = os.path.realpath(os.curdir)\n if self.args.cwd:\n os.chdir(self.args.cwd)\n\n ret = 0\n for target in self.args.targets:\n try:\n stages = self.project.reproduce(\n target,\n recursive=recursive,\n force=self.args.force,\n dry=self.args.dry,\n interactive=self.args.interactive,\n pipeline=self.args.pipeline,\n all_pipelines=self.args.all_pipelines,\n ignore_build_cache=self.args.ignore_build_cache)\n\n if len(stages) == 0:\n self.project.logger.info(CmdDataStatus.UP_TO_DATE_MSG)\n\n if self.args.metrics:\n self.project.metrics_show()\n except DvcException as ex:\n msg = 'Failed to reproduce \\'{}\\''.format(target)\n self.project.logger.error(msg, ex)\n ret = 1\n break\n\n os.chdir(saved_dir)\n return ret\n", "path": "dvc/command/repro.py"}]}
872
116
gh_patches_debug_3798
rasdani/github-patches
git_diff
Parsl__parsl-618
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error building docs with new env ``` Running Sphinx v1.8.1 loading pickled environment... failed: build environment version not current Theme error: sphinx_rtd_theme is no longer a hard dependency since version 1.4.0. Please install it manually.(pip install sphinx_rtd_theme) ``` </issue> <code> [start of setup.py] 1 from setuptools import setup, find_packages 2 3 with open('parsl/version.py') as f: 4 exec(f.read()) 5 6 with open('requirements.txt') as f: 7 install_requires = f.readlines() 8 9 setup( 10 name='parsl', 11 version=VERSION, 12 description='Simple data dependent workflows in Python', 13 long_description='Simple parallel workflows system for Python', 14 url='https://github.com/Parsl/parsl', 15 author='The Parsl Team', 16 author_email='[email protected]', 17 license='Apache 2.0', 18 download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION), 19 package_data={'': ['LICENSE']}, 20 packages=find_packages(), 21 install_requires=install_requires, 22 scripts = ['parsl/executors/high_throughput/process_worker_pool.py', 23 'parsl/executors/extreme_scale/mpi_worker_pool.py'], 24 extras_require = { 25 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'], 26 'aws' : ['boto3'], 27 'jetstream' : ['python-novaclient'], 28 'extreme_scale' : ['mpi4py'], 29 'docs' : ['nbsphinx'], 30 'google_cloud' : ['google-auth', 'google-api-python-client'] 31 }, 32 classifiers = [ 33 # Maturity 34 'Development Status :: 3 - Alpha', 35 # Intended audience 36 'Intended Audience :: Developers', 37 # Licence, must match with licence above 38 'License :: OSI Approved :: Apache Software License', 39 # Python versions supported 40 'Programming Language :: Python :: 3.5', 41 'Programming Language :: Python :: 3.6', 42 ], 43 keywords=['Workflows', 'Scientific computing'], 44 ) 45 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -26,7 +26,7 @@ 'aws' : ['boto3'], 'jetstream' : ['python-novaclient'], 'extreme_scale' : ['mpi4py'], - 'docs' : ['nbsphinx'], + 'docs' : ['nbsphinx', 'sphinx_rtd_theme'], 'google_cloud' : ['google-auth', 'google-api-python-client'] }, classifiers = [
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,7 +26,7 @@\n 'aws' : ['boto3'],\n 'jetstream' : ['python-novaclient'],\n 'extreme_scale' : ['mpi4py'],\n- 'docs' : ['nbsphinx'],\n+ 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n 'google_cloud' : ['google-auth', 'google-api-python-client']\n },\n classifiers = [\n", "issue": "Error building docs with new env\n\r\n```\r\nRunning Sphinx v1.8.1\r\nloading pickled environment... failed: build environment version not current\r\n\r\nTheme error:\r\nsphinx_rtd_theme is no longer a hard dependency since version 1.4.0. Please install it manually.(pip install sphinx_rtd_theme)\r\n```\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n package_data={'': ['LICENSE']},\n packages=find_packages(),\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/extreme_scale/mpi_worker_pool.py'],\n extras_require = {\n 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'],\n 'aws' : ['boto3'],\n 'jetstream' : ['python-novaclient'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx'],\n 'google_cloud' : ['google-auth', 'google-api-python-client']\n },\n classifiers = [\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n)\n", "path": "setup.py"}]}
1,060
118
gh_patches_debug_28357
rasdani/github-patches
git_diff
pyca__cryptography-4200
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Some OpenSSL bignum arithmetic operations are not in constant time ### What's wrong: 1. Some arithmetic operations exposed in the [OpenSSL bignum binding](https://github.com/pyca/cryptography/blob/master/src/_cffi_src/openssl/bignum.py) are not in constant time. 2. These functions are specifically: 1. `BN_div` 2. `BN_mod_inverse` 3. `BN_mod_exp` ### How to resolve: 1. The OpenSSL solution to this is to call `BN_set_flags` on the secret BIGNUM and set the `BN_FLG_CONSTTIME` flag. [The OpenSSL docs reference this here.](https://github.com/openssl/openssl/blob/master/include/openssl/bn.h#L61) 2. Expose the needed operations: 1. `BN_MONT_CTX` operations. 2. `BN_mod_exp_mont`, and `BN_mod_exp_mont_consttime` 3. `BN_set_flags`, `BN_get_flags`, and the `BN_FLG_CONSTTIME` flag. ### Notes: 1. Cryptography.io isn't affected by these timing issues. I have clarified with @alex and @reaperhulk. 2. If you are a downstream user utilizing these operations on private/secret values, then _you are affected_. </issue> <code> [start of src/_cffi_src/openssl/bignum.py] 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 from __future__ import absolute_import, division, print_function 6 7 INCLUDES = """ 8 #include <openssl/bn.h> 9 """ 10 11 TYPES = """ 12 typedef ... BN_CTX; 13 typedef ... BIGNUM; 14 typedef int... BN_ULONG; 15 """ 16 17 FUNCTIONS = """ 18 BIGNUM *BN_new(void); 19 void BN_free(BIGNUM *); 20 void BN_clear_free(BIGNUM *); 21 22 int BN_rand(BIGNUM *, int, int, int); 23 int BN_rand_range(BIGNUM *, BIGNUM *); 24 25 BN_CTX *BN_CTX_new(void); 26 void BN_CTX_free(BN_CTX *); 27 28 void BN_CTX_start(BN_CTX *); 29 BIGNUM *BN_CTX_get(BN_CTX *); 30 void BN_CTX_end(BN_CTX *); 31 32 BIGNUM *BN_copy(BIGNUM *, const BIGNUM *); 33 BIGNUM *BN_dup(const BIGNUM *); 34 35 int BN_set_word(BIGNUM *, BN_ULONG); 36 BN_ULONG BN_get_word(const BIGNUM *); 37 38 const BIGNUM *BN_value_one(void); 39 40 char *BN_bn2hex(const BIGNUM *); 41 int BN_hex2bn(BIGNUM **, const char *); 42 int BN_dec2bn(BIGNUM **, const char *); 43 44 int BN_bn2bin(const BIGNUM *, unsigned char *); 45 BIGNUM *BN_bin2bn(const unsigned char *, int, BIGNUM *); 46 47 int BN_num_bits(const BIGNUM *); 48 49 int BN_cmp(const BIGNUM *, const BIGNUM *); 50 int BN_add(BIGNUM *, const BIGNUM *, const BIGNUM *); 51 int BN_sub(BIGNUM *, const BIGNUM *, const BIGNUM *); 52 int BN_mul(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *); 53 int BN_sqr(BIGNUM *, const BIGNUM *, BN_CTX *); 54 int BN_div(BIGNUM *, BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *); 55 int BN_nnmod(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *); 56 int BN_mod_add(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *, 57 BN_CTX *); 58 int BN_mod_sub(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *, 59 BN_CTX *); 60 int BN_mod_mul(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *, 61 BN_CTX *); 62 int BN_mod_sqr(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *); 63 int BN_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *); 64 int BN_mod_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *, 65 BN_CTX *); 66 int BN_gcd(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *); 67 BIGNUM *BN_mod_inverse(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *); 68 69 int BN_set_bit(BIGNUM *, int); 70 int BN_clear_bit(BIGNUM *, int); 71 72 int BN_is_bit_set(const BIGNUM *, int); 73 74 int BN_mask_bits(BIGNUM *, int); 75 76 int BN_num_bytes(const BIGNUM *); 77 78 int BN_zero(BIGNUM *); 79 int BN_one(BIGNUM *); 80 int BN_mod(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *); 81 82 int BN_lshift(BIGNUM *, const BIGNUM *, int); 83 int BN_lshift1(BIGNUM *, BIGNUM *); 84 85 int BN_rshift(BIGNUM *, BIGNUM *, int); 86 int BN_rshift1(BIGNUM *, BIGNUM *); 87 """ 88 89 CUSTOMIZATIONS = """ 90 """ 91 [end of src/_cffi_src/openssl/bignum.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/_cffi_src/openssl/bignum.py b/src/_cffi_src/openssl/bignum.py --- a/src/_cffi_src/openssl/bignum.py +++ b/src/_cffi_src/openssl/bignum.py @@ -10,11 +10,17 @@ TYPES = """ typedef ... BN_CTX; +typedef ... BN_MONT_CTX; typedef ... BIGNUM; typedef int... BN_ULONG; """ FUNCTIONS = """ +#define BN_FLG_CONSTTIME ... + +void BN_set_flags(BIGNUM *, int); +int BN_get_flags(const BIGNUM *, int); + BIGNUM *BN_new(void); void BN_free(BIGNUM *); void BN_clear_free(BIGNUM *); @@ -29,6 +35,10 @@ BIGNUM *BN_CTX_get(BN_CTX *); void BN_CTX_end(BN_CTX *); +BN_MONT_CTX *BN_MONT_CTX_new(void); +int BN_MONT_CTX_set(BN_MONT_CTX *, BIGNUM *, BN_CTX *); +void BN_MONT_CTX_free(BN_MONT_CTX *); + BIGNUM *BN_copy(BIGNUM *, const BIGNUM *); BIGNUM *BN_dup(const BIGNUM *); @@ -63,6 +73,10 @@ int BN_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *); int BN_mod_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *); +int BN_mod_exp_mont(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *, + BN_CTX *, BN_MONT_CTX *); +int BN_mod_exp_mont_consttime(BIGNUM *, const BIGNUM *, const BIGNUM *, + const BIGNUM *, BN_CTX *, BN_MONT_CTX *); int BN_gcd(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *); BIGNUM *BN_mod_inverse(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
{"golden_diff": "diff --git a/src/_cffi_src/openssl/bignum.py b/src/_cffi_src/openssl/bignum.py\n--- a/src/_cffi_src/openssl/bignum.py\n+++ b/src/_cffi_src/openssl/bignum.py\n@@ -10,11 +10,17 @@\n \n TYPES = \"\"\"\n typedef ... BN_CTX;\n+typedef ... BN_MONT_CTX;\n typedef ... BIGNUM;\n typedef int... BN_ULONG;\n \"\"\"\n \n FUNCTIONS = \"\"\"\n+#define BN_FLG_CONSTTIME ...\n+\n+void BN_set_flags(BIGNUM *, int);\n+int BN_get_flags(const BIGNUM *, int);\n+\n BIGNUM *BN_new(void);\n void BN_free(BIGNUM *);\n void BN_clear_free(BIGNUM *);\n@@ -29,6 +35,10 @@\n BIGNUM *BN_CTX_get(BN_CTX *);\n void BN_CTX_end(BN_CTX *);\n \n+BN_MONT_CTX *BN_MONT_CTX_new(void);\n+int BN_MONT_CTX_set(BN_MONT_CTX *, BIGNUM *, BN_CTX *);\n+void BN_MONT_CTX_free(BN_MONT_CTX *);\n+\n BIGNUM *BN_copy(BIGNUM *, const BIGNUM *);\n BIGNUM *BN_dup(const BIGNUM *);\n \n@@ -63,6 +73,10 @@\n int BN_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\n int BN_mod_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,\n BN_CTX *);\n+int BN_mod_exp_mont(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,\n+ BN_CTX *, BN_MONT_CTX *);\n+int BN_mod_exp_mont_consttime(BIGNUM *, const BIGNUM *, const BIGNUM *,\n+ const BIGNUM *, BN_CTX *, BN_MONT_CTX *);\n int BN_gcd(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\n BIGNUM *BN_mod_inverse(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\n", "issue": "Some OpenSSL bignum arithmetic operations are not in constant time\n### What's wrong:\r\n1. Some arithmetic operations exposed in the [OpenSSL bignum binding](https://github.com/pyca/cryptography/blob/master/src/_cffi_src/openssl/bignum.py) are not in constant time.\r\n2. These functions are specifically:\r\n 1. `BN_div`\r\n 2. `BN_mod_inverse`\r\n 3. `BN_mod_exp`\r\n\r\n### How to resolve:\r\n1. The OpenSSL solution to this is to call `BN_set_flags` on the secret BIGNUM and set the `BN_FLG_CONSTTIME` flag. [The OpenSSL docs reference this here.](https://github.com/openssl/openssl/blob/master/include/openssl/bn.h#L61)\r\n2. Expose the needed operations:\r\n 1. `BN_MONT_CTX` operations.\r\n 2. `BN_mod_exp_mont`, and `BN_mod_exp_mont_consttime` \r\n 3. `BN_set_flags`, `BN_get_flags`, and the `BN_FLG_CONSTTIME` flag.\r\n\r\n### Notes:\r\n1. Cryptography.io isn't affected by these timing issues. I have clarified with @alex and @reaperhulk.\r\n2. If you are a downstream user utilizing these operations on private/secret values, then _you are affected_.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nINCLUDES = \"\"\"\n#include <openssl/bn.h>\n\"\"\"\n\nTYPES = \"\"\"\ntypedef ... BN_CTX;\ntypedef ... BIGNUM;\ntypedef int... BN_ULONG;\n\"\"\"\n\nFUNCTIONS = \"\"\"\nBIGNUM *BN_new(void);\nvoid BN_free(BIGNUM *);\nvoid BN_clear_free(BIGNUM *);\n\nint BN_rand(BIGNUM *, int, int, int);\nint BN_rand_range(BIGNUM *, BIGNUM *);\n\nBN_CTX *BN_CTX_new(void);\nvoid BN_CTX_free(BN_CTX *);\n\nvoid BN_CTX_start(BN_CTX *);\nBIGNUM *BN_CTX_get(BN_CTX *);\nvoid BN_CTX_end(BN_CTX *);\n\nBIGNUM *BN_copy(BIGNUM *, const BIGNUM *);\nBIGNUM *BN_dup(const BIGNUM *);\n\nint BN_set_word(BIGNUM *, BN_ULONG);\nBN_ULONG BN_get_word(const BIGNUM *);\n\nconst BIGNUM *BN_value_one(void);\n\nchar *BN_bn2hex(const BIGNUM *);\nint BN_hex2bn(BIGNUM **, const char *);\nint BN_dec2bn(BIGNUM **, const char *);\n\nint BN_bn2bin(const BIGNUM *, unsigned char *);\nBIGNUM *BN_bin2bn(const unsigned char *, int, BIGNUM *);\n\nint BN_num_bits(const BIGNUM *);\n\nint BN_cmp(const BIGNUM *, const BIGNUM *);\nint BN_add(BIGNUM *, const BIGNUM *, const BIGNUM *);\nint BN_sub(BIGNUM *, const BIGNUM *, const BIGNUM *);\nint BN_mul(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\nint BN_sqr(BIGNUM *, const BIGNUM *, BN_CTX *);\nint BN_div(BIGNUM *, BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\nint BN_nnmod(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\nint BN_mod_add(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,\n BN_CTX *);\nint BN_mod_sub(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,\n BN_CTX *);\nint BN_mod_mul(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,\n BN_CTX *);\nint BN_mod_sqr(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\nint BN_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\nint BN_mod_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,\n BN_CTX *);\nint BN_gcd(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\nBIGNUM *BN_mod_inverse(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\n\nint BN_set_bit(BIGNUM *, int);\nint BN_clear_bit(BIGNUM *, int);\n\nint BN_is_bit_set(const BIGNUM *, int);\n\nint BN_mask_bits(BIGNUM *, int);\n\nint BN_num_bytes(const BIGNUM *);\n\nint BN_zero(BIGNUM *);\nint BN_one(BIGNUM *);\nint BN_mod(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\n\nint BN_lshift(BIGNUM *, const BIGNUM *, int);\nint BN_lshift1(BIGNUM *, BIGNUM *);\n\nint BN_rshift(BIGNUM *, BIGNUM *, int);\nint BN_rshift1(BIGNUM *, BIGNUM *);\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n\"\"\"\n", "path": "src/_cffi_src/openssl/bignum.py"}]}
1,813
444
gh_patches_debug_19759
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-761
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Provide default gpu resource name and validation Currently users can only pass "gpu" as part of the resource name. However, k8s requires it to be either `"nvidia.com/gpu"` or `"amd.com/gpu"` if AMD plugin is enabled. There are other different vendors as well but a pattern to use for validation would be `"<vendor>.com/gpu"`. We should consider adding `"nvidia.com/gpu"` as the default and validate for user provided gpu resource name based on the pattern `"<vendor>.com/gpu"`. Reference: https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/ </issue> <code> [start of elasticdl/python/elasticdl/common/k8s_utils.py] 1 import re 2 3 4 _ALLOWED_RESOURCE_TYPES = ["memory", "disk", "ephemeral-storage", "cpu", "gpu"] 5 6 7 def _is_numeric(n): 8 try: 9 float(n) 10 except ValueError: 11 return False 12 return True 13 14 15 def _valid_gpu_spec(gpu_str): 16 if not gpu_str.isnumeric(): 17 raise ValueError("invalid gpu request spec: " + gpu_str) 18 return gpu_str 19 20 21 def _valid_cpu_spec(cpu_str): 22 regexp = re.compile("([1-9]{1})([0-9]*)m$") 23 if not regexp.match(cpu_str) and not _is_numeric(cpu_str): 24 raise ValueError("invalid cpu request spec: " + cpu_str) 25 return cpu_str 26 27 28 def _valid_mem_spec(mem_str): 29 regexp = re.compile("([1-9]{1})([0-9]*)(E|P|T|G|M|K|Ei|Pi|Ti|Gi|Mi|Ki)$") 30 if not regexp.match(mem_str): 31 raise ValueError("invalid memory request spec: " + mem_str) 32 return mem_str 33 34 35 def parse_resource(resource_str): 36 """Parse combined k8s resource string into a dict. 37 38 Args: 39 resource_str: The string representation for k8s resource, 40 e.g. "cpu=250m,memory=32Mi,disk=64Mi,gpu=1,ephemeral-storage=32Mi". 41 42 Return: 43 A Python dictionary parsed from the given resource string. 44 """ 45 kvs = resource_str.split(",") 46 resource_names = [] 47 parsed_res_dict = {} 48 for kv in kvs: 49 k, v = kv.split("=") 50 if k not in resource_names: 51 resource_names.append(k) 52 else: 53 raise ValueError( 54 "The resource string contains duplicate resource names: %s" % k 55 ) 56 if k in ["memory", "disk", "ephemeral-storage"]: 57 _valid_mem_spec(v) 58 elif k == "cpu": 59 _valid_cpu_spec(v) 60 elif k == "gpu": 61 _valid_gpu_spec(v) 62 else: 63 raise ValueError( 64 "%s is not in the allowed list of resource types: %s" 65 % (k, _ALLOWED_RESOURCE_TYPES) 66 ) 67 parsed_res_dict[k] = v 68 return parsed_res_dict 69 [end of elasticdl/python/elasticdl/common/k8s_utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/elasticdl/python/elasticdl/common/k8s_utils.py b/elasticdl/python/elasticdl/common/k8s_utils.py --- a/elasticdl/python/elasticdl/common/k8s_utils.py +++ b/elasticdl/python/elasticdl/common/k8s_utils.py @@ -2,6 +2,10 @@ _ALLOWED_RESOURCE_TYPES = ["memory", "disk", "ephemeral-storage", "cpu", "gpu"] +# Any domain name is (syntactically) valid if it's a dot-separated list of +# identifiers, each no longer than 63 characters, and made up of letters, +# digits and dashes (no underscores). +_GPU_VENDOR_REGEX_STR = r"^[a-zA-Z\d-]{,63}(\.[a-zA-Z\d-]{,63})*/gpu$" def _is_numeric(n): @@ -57,7 +61,14 @@ _valid_mem_spec(v) elif k == "cpu": _valid_cpu_spec(v) - elif k == "gpu": + elif "gpu" in k: + if k == "gpu": + k = "nvidia.com/gpu" + elif not re.compile(_GPU_VENDOR_REGEX_STR).match(k): + raise ValueError( + "gpu resource name does not have a valid vendor name: %s" + % k + ) _valid_gpu_spec(v) else: raise ValueError(
{"golden_diff": "diff --git a/elasticdl/python/elasticdl/common/k8s_utils.py b/elasticdl/python/elasticdl/common/k8s_utils.py\n--- a/elasticdl/python/elasticdl/common/k8s_utils.py\n+++ b/elasticdl/python/elasticdl/common/k8s_utils.py\n@@ -2,6 +2,10 @@\n \n \n _ALLOWED_RESOURCE_TYPES = [\"memory\", \"disk\", \"ephemeral-storage\", \"cpu\", \"gpu\"]\n+# Any domain name is (syntactically) valid if it's a dot-separated list of\n+# identifiers, each no longer than 63 characters, and made up of letters,\n+# digits and dashes (no underscores).\n+_GPU_VENDOR_REGEX_STR = r\"^[a-zA-Z\\d-]{,63}(\\.[a-zA-Z\\d-]{,63})*/gpu$\"\n \n \n def _is_numeric(n):\n@@ -57,7 +61,14 @@\n _valid_mem_spec(v)\n elif k == \"cpu\":\n _valid_cpu_spec(v)\n- elif k == \"gpu\":\n+ elif \"gpu\" in k:\n+ if k == \"gpu\":\n+ k = \"nvidia.com/gpu\"\n+ elif not re.compile(_GPU_VENDOR_REGEX_STR).match(k):\n+ raise ValueError(\n+ \"gpu resource name does not have a valid vendor name: %s\"\n+ % k\n+ )\n _valid_gpu_spec(v)\n else:\n raise ValueError(\n", "issue": "Provide default gpu resource name and validation\nCurrently users can only pass \"gpu\" as part of the resource name. However, k8s requires it to be either `\"nvidia.com/gpu\"` or `\"amd.com/gpu\"` if AMD plugin is enabled. There are other different vendors as well but a pattern to use for validation would be `\"<vendor>.com/gpu\"`.\r\n\r\nWe should consider adding `\"nvidia.com/gpu\"` as the default and validate for user provided gpu resource name based on the pattern `\"<vendor>.com/gpu\"`.\r\n\r\nReference: https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/\n", "before_files": [{"content": "import re\n\n\n_ALLOWED_RESOURCE_TYPES = [\"memory\", \"disk\", \"ephemeral-storage\", \"cpu\", \"gpu\"]\n\n\ndef _is_numeric(n):\n try:\n float(n)\n except ValueError:\n return False\n return True\n\n\ndef _valid_gpu_spec(gpu_str):\n if not gpu_str.isnumeric():\n raise ValueError(\"invalid gpu request spec: \" + gpu_str)\n return gpu_str\n\n\ndef _valid_cpu_spec(cpu_str):\n regexp = re.compile(\"([1-9]{1})([0-9]*)m$\")\n if not regexp.match(cpu_str) and not _is_numeric(cpu_str):\n raise ValueError(\"invalid cpu request spec: \" + cpu_str)\n return cpu_str\n\n\ndef _valid_mem_spec(mem_str):\n regexp = re.compile(\"([1-9]{1})([0-9]*)(E|P|T|G|M|K|Ei|Pi|Ti|Gi|Mi|Ki)$\")\n if not regexp.match(mem_str):\n raise ValueError(\"invalid memory request spec: \" + mem_str)\n return mem_str\n\n\ndef parse_resource(resource_str):\n \"\"\"Parse combined k8s resource string into a dict.\n\n Args:\n resource_str: The string representation for k8s resource,\n e.g. \"cpu=250m,memory=32Mi,disk=64Mi,gpu=1,ephemeral-storage=32Mi\".\n\n Return:\n A Python dictionary parsed from the given resource string.\n \"\"\"\n kvs = resource_str.split(\",\")\n resource_names = []\n parsed_res_dict = {}\n for kv in kvs:\n k, v = kv.split(\"=\")\n if k not in resource_names:\n resource_names.append(k)\n else:\n raise ValueError(\n \"The resource string contains duplicate resource names: %s\" % k\n )\n if k in [\"memory\", \"disk\", \"ephemeral-storage\"]:\n _valid_mem_spec(v)\n elif k == \"cpu\":\n _valid_cpu_spec(v)\n elif k == \"gpu\":\n _valid_gpu_spec(v)\n else:\n raise ValueError(\n \"%s is not in the allowed list of resource types: %s\"\n % (k, _ALLOWED_RESOURCE_TYPES)\n )\n parsed_res_dict[k] = v\n return parsed_res_dict\n", "path": "elasticdl/python/elasticdl/common/k8s_utils.py"}]}
1,317
319
gh_patches_debug_8552
rasdani/github-patches
git_diff
Gallopsled__pwntools-323
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Importing shellcraft submodules doesn't work ``` $ python >>> import pwnlib.shellcraft.arm Traceback (most recent call last): File "<stdin>", line 1, in <module> ImportError: No module named arm ``` </issue> <code> [start of pwnlib/shellcraft/__init__.py] 1 from types import ModuleType 2 import sys, os, re 3 from . import internal 4 from ..context import context 5 6 class module(ModuleType): 7 def __init__(self, name, directory): 8 super(module, self).__init__(name) 9 10 # Insert nice properties 11 self.__dict__.update({ 12 '__file__': __file__, 13 '__package__': __package__, 14 '__path__': __path__, 15 }) 16 17 # Save the shellcode directory 18 self._dir = directory 19 20 # Find the absolute path of the directory 21 self._absdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates', self._dir) 22 23 # Get the docstring 24 with open(os.path.join(self._absdir, "__doc__")) as fd: 25 self.__doc__ = fd.read() 26 27 # Insert into the module list 28 sys.modules[self.__name__] = self 29 30 def __lazyinit__(self): 31 32 # Create a dictionary of submodules 33 self._submodules = {} 34 self._shellcodes = {} 35 for name in os.listdir(self._absdir): 36 path = os.path.join(self._absdir, name) 37 if os.path.isdir(path): 38 self._submodules[name] = module(self.__name__ + '.' + name, os.path.join(self._dir, name)) 39 elif os.path.isfile(path) and name != '__doc__' and name[0] != '.': 40 funcname, _ext = os.path.splitext(name) 41 if not re.match('^[a-zA-Z][a-zA-Z0-9_]*$', funcname): 42 raise ValueError("found illegal filename, %r" % name) 43 self._shellcodes[funcname] = name 44 45 # Put the submodules into toplevel 46 self.__dict__.update(self._submodules) 47 48 # These are exported 49 self.__all__ = sorted(self._shellcodes.keys() + self._submodules.keys()) 50 51 # Make sure this is not called again 52 self.__lazyinit__ = None 53 54 def __getattr__(self, key): 55 self.__lazyinit__ and self.__lazyinit__() 56 57 # Maybe the lazyinit added it 58 if key in self.__dict__: 59 return self.__dict__[key] 60 61 # This function lazy-loads the shellcodes 62 if key in self._shellcodes: 63 real = internal.make_function(key, self._shellcodes[key], self._dir) 64 setattr(self, key, real) 65 return real 66 67 for m in self._context_modules(): 68 try: 69 return getattr(m, key) 70 except AttributeError: 71 pass 72 73 raise AttributeError("'module' object has no attribute '%s'" % key) 74 75 def __dir__(self): 76 # This function lists the available submodules, available shellcodes 77 # and potentially shellcodes available in submodules that should be 78 # avilable because of the context 79 self.__lazyinit__ and self.__lazyinit__() 80 81 result = list(self._submodules.keys()) 82 result.extend(('__file__', '__package__', '__path__', 83 '__all__', '__name__')) 84 result.extend(self.__shellcodes__()) 85 86 return result 87 88 def _context_modules(self): 89 self.__lazyinit__ and self.__lazyinit__() 90 for k, m in self._submodules.items(): 91 if k in [context.arch, context.os]: 92 yield m 93 94 def __shellcodes__(self): 95 self.__lazyinit__ and self.__lazyinit__() 96 result = self._shellcodes.keys() 97 for m in self._context_modules(): 98 result.extend(m.__shellcodes__()) 99 return result 100 101 # To prevent garbage collection 102 tether = sys.modules[__name__] 103 104 # Create the module structure 105 module(__name__, '') 106 [end of pwnlib/shellcraft/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwnlib/shellcraft/__init__.py b/pwnlib/shellcraft/__init__.py --- a/pwnlib/shellcraft/__init__.py +++ b/pwnlib/shellcraft/__init__.py @@ -102,4 +102,22 @@ tether = sys.modules[__name__] # Create the module structure -module(__name__, '') +shellcraft = module(__name__, '') + +class LazyImporter: + def find_module(self, fullname, path): + if not fullname.startswith('pwnlib.shellcraft.'): + return None + + parts = fullname.split('.')[2:] + cur = shellcraft + for part in parts: + cur = getattr(cur, part, None) + if not isinstance(cur, ModuleType): + return None + + return self + + def load_module(self, fullname): + return sys.modules[fullname] +sys.meta_path.append(LazyImporter())
{"golden_diff": "diff --git a/pwnlib/shellcraft/__init__.py b/pwnlib/shellcraft/__init__.py\n--- a/pwnlib/shellcraft/__init__.py\n+++ b/pwnlib/shellcraft/__init__.py\n@@ -102,4 +102,22 @@\n tether = sys.modules[__name__]\n \n # Create the module structure\n-module(__name__, '')\n+shellcraft = module(__name__, '')\n+\n+class LazyImporter:\n+ def find_module(self, fullname, path):\n+ if not fullname.startswith('pwnlib.shellcraft.'):\n+ return None\n+\n+ parts = fullname.split('.')[2:]\n+ cur = shellcraft\n+ for part in parts:\n+ cur = getattr(cur, part, None)\n+ if not isinstance(cur, ModuleType):\n+ return None\n+\n+ return self\n+\n+ def load_module(self, fullname):\n+ return sys.modules[fullname]\n+sys.meta_path.append(LazyImporter())\n", "issue": "Importing shellcraft submodules doesn't work\n```\n$ python\n>>> import pwnlib.shellcraft.arm\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nImportError: No module named arm\n```\n\n", "before_files": [{"content": "from types import ModuleType\nimport sys, os, re\nfrom . import internal\nfrom ..context import context\n\nclass module(ModuleType):\n def __init__(self, name, directory):\n super(module, self).__init__(name)\n\n # Insert nice properties\n self.__dict__.update({\n '__file__': __file__,\n '__package__': __package__,\n '__path__': __path__,\n })\n\n # Save the shellcode directory\n self._dir = directory\n\n # Find the absolute path of the directory\n self._absdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates', self._dir)\n\n # Get the docstring\n with open(os.path.join(self._absdir, \"__doc__\")) as fd:\n self.__doc__ = fd.read()\n\n # Insert into the module list\n sys.modules[self.__name__] = self\n\n def __lazyinit__(self):\n\n # Create a dictionary of submodules\n self._submodules = {}\n self._shellcodes = {}\n for name in os.listdir(self._absdir):\n path = os.path.join(self._absdir, name)\n if os.path.isdir(path):\n self._submodules[name] = module(self.__name__ + '.' + name, os.path.join(self._dir, name))\n elif os.path.isfile(path) and name != '__doc__' and name[0] != '.':\n funcname, _ext = os.path.splitext(name)\n if not re.match('^[a-zA-Z][a-zA-Z0-9_]*$', funcname):\n raise ValueError(\"found illegal filename, %r\" % name)\n self._shellcodes[funcname] = name\n\n # Put the submodules into toplevel\n self.__dict__.update(self._submodules)\n\n # These are exported\n self.__all__ = sorted(self._shellcodes.keys() + self._submodules.keys())\n\n # Make sure this is not called again\n self.__lazyinit__ = None\n\n def __getattr__(self, key):\n self.__lazyinit__ and self.__lazyinit__()\n\n # Maybe the lazyinit added it\n if key in self.__dict__:\n return self.__dict__[key]\n\n # This function lazy-loads the shellcodes\n if key in self._shellcodes:\n real = internal.make_function(key, self._shellcodes[key], self._dir)\n setattr(self, key, real)\n return real\n\n for m in self._context_modules():\n try:\n return getattr(m, key)\n except AttributeError:\n pass\n\n raise AttributeError(\"'module' object has no attribute '%s'\" % key)\n\n def __dir__(self):\n # This function lists the available submodules, available shellcodes\n # and potentially shellcodes available in submodules that should be\n # avilable because of the context\n self.__lazyinit__ and self.__lazyinit__()\n\n result = list(self._submodules.keys())\n result.extend(('__file__', '__package__', '__path__',\n '__all__', '__name__'))\n result.extend(self.__shellcodes__())\n\n return result\n\n def _context_modules(self):\n self.__lazyinit__ and self.__lazyinit__()\n for k, m in self._submodules.items():\n if k in [context.arch, context.os]:\n yield m\n\n def __shellcodes__(self):\n self.__lazyinit__ and self.__lazyinit__()\n result = self._shellcodes.keys()\n for m in self._context_modules():\n result.extend(m.__shellcodes__())\n return result\n\n# To prevent garbage collection\ntether = sys.modules[__name__]\n\n# Create the module structure\nmodule(__name__, '')\n", "path": "pwnlib/shellcraft/__init__.py"}]}
1,631
216
gh_patches_debug_26773
rasdani/github-patches
git_diff
optuna__optuna-4940
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix `Test` warnings from `BoTorch` ### Motivation Resolve following warnings from test: ``` tests/terminator_tests/improvement_tests/gp_tests/test_botorch.py: 4 warnings tests/visualization_tests/test_terminator_improvement.py: 46 warnings /opt/hostedtoolcache/Python/3.9.18/x64/lib/python3.9/site-packages/botorch/fit.py:139: DeprecationWarning: `fit_gpytorch_model` is marked for deprecation, consider using `fit_gpytorch_mll` instead. ``` ### Suggestion Look for the proper way to fix this warning (Hopefully, just replacing `fit_gpytorch_model` with `fit_gpytorch_mll`). ### Additional context (optional) _No response_ </issue> <code> [start of optuna/terminator/improvement/gp/botorch.py] 1 from __future__ import annotations 2 3 from typing import Optional 4 5 import numpy as np 6 7 from optuna._imports import try_import 8 from optuna.distributions import _is_distribution_log 9 from optuna.distributions import CategoricalDistribution 10 from optuna.distributions import FloatDistribution 11 from optuna.distributions import IntDistribution 12 from optuna.search_space import intersection_search_space 13 from optuna.terminator.improvement.gp.base import BaseGaussianProcess 14 from optuna.trial._frozen import FrozenTrial 15 from optuna.trial._state import TrialState 16 17 18 with try_import() as _imports: 19 from botorch.fit import fit_gpytorch_model 20 from botorch.models import SingleTaskGP 21 from botorch.models.transforms import Normalize 22 from botorch.models.transforms import Standardize 23 import gpytorch 24 import torch 25 26 __all__ = [ 27 "fit_gpytorch_model", 28 "SingleTaskGP", 29 "Normalize", 30 "Standardize", 31 "gpytorch", 32 "torch", 33 ] 34 35 36 class _BoTorchGaussianProcess(BaseGaussianProcess): 37 def __init__(self) -> None: 38 _imports.check() 39 40 self._gp: Optional[SingleTaskGP] = None 41 42 def fit( 43 self, 44 trials: list[FrozenTrial], 45 ) -> None: 46 self._trials = trials 47 48 x, bounds = _convert_trials_to_tensors(trials) 49 50 n_params = x.shape[1] 51 52 y = torch.tensor([trial.value for trial in trials], dtype=torch.float64) 53 y = torch.unsqueeze(y, 1) 54 55 self._gp = SingleTaskGP( 56 x, 57 y, 58 input_transform=Normalize(d=n_params, bounds=bounds), 59 outcome_transform=Standardize(m=1), 60 ) 61 62 mll = gpytorch.mlls.ExactMarginalLogLikelihood(self._gp.likelihood, self._gp) 63 64 fit_gpytorch_model(mll) 65 66 def predict_mean_std( 67 self, 68 trials: list[FrozenTrial], 69 ) -> tuple[np.ndarray, np.ndarray]: 70 assert self._gp is not None 71 72 x, _ = _convert_trials_to_tensors(trials) 73 74 with torch.no_grad(), gpytorch.settings.fast_pred_var(): 75 posterior = self._gp.posterior(x) 76 mean = posterior.mean 77 variance = posterior.variance 78 std = variance.sqrt() 79 80 return mean.detach().numpy(), std.detach().numpy() 81 82 83 def _convert_trials_to_tensors(trials: list[FrozenTrial]) -> tuple[torch.Tensor, torch.Tensor]: 84 """Convert a list of FrozenTrial objects to tensors inputs and bounds. 85 86 This function assumes the following condition for input trials: 87 - any categorical param is converted to a float or int one; 88 - log is unscaled for any float/int distribution; 89 - the state is COMPLETE for any trial; 90 - direction is MINIMIZE for any trial. 91 """ 92 search_space = intersection_search_space(trials) 93 sorted_params = sorted(search_space.keys()) 94 95 x = [] 96 for trial in trials: 97 assert trial.state == TrialState.COMPLETE 98 x_row = [] 99 for param in sorted_params: 100 distribution = search_space[param] 101 102 assert not _is_distribution_log(distribution) 103 assert not isinstance(distribution, CategoricalDistribution) 104 105 param_value = float(trial.params[param]) 106 x_row.append(param_value) 107 108 x.append(x_row) 109 110 min_bounds = [] 111 max_bounds = [] 112 for param, distribution in search_space.items(): 113 assert isinstance(distribution, (FloatDistribution, IntDistribution)) 114 min_bounds.append(distribution.low) 115 max_bounds.append(distribution.high) 116 bounds = [min_bounds, max_bounds] 117 118 return torch.tensor(x, dtype=torch.float64), torch.tensor(bounds, dtype=torch.float64) 119 [end of optuna/terminator/improvement/gp/botorch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/optuna/terminator/improvement/gp/botorch.py b/optuna/terminator/improvement/gp/botorch.py --- a/optuna/terminator/improvement/gp/botorch.py +++ b/optuna/terminator/improvement/gp/botorch.py @@ -3,6 +3,7 @@ from typing import Optional import numpy as np +from packaging import version from optuna._imports import try_import from optuna.distributions import _is_distribution_log @@ -16,15 +17,20 @@ with try_import() as _imports: - from botorch.fit import fit_gpytorch_model + import botorch from botorch.models import SingleTaskGP from botorch.models.transforms import Normalize from botorch.models.transforms import Standardize import gpytorch import torch + if version.parse(botorch.version.version) < version.parse("0.8.0"): + from botorch.fit import fit_gpytorch_model as fit_gpytorch_mll + else: + from botorch.fit import fit_gpytorch_mll + __all__ = [ - "fit_gpytorch_model", + "fit_gpytorch_mll", "SingleTaskGP", "Normalize", "Standardize", @@ -61,7 +67,7 @@ mll = gpytorch.mlls.ExactMarginalLogLikelihood(self._gp.likelihood, self._gp) - fit_gpytorch_model(mll) + fit_gpytorch_mll(mll) def predict_mean_std( self,
{"golden_diff": "diff --git a/optuna/terminator/improvement/gp/botorch.py b/optuna/terminator/improvement/gp/botorch.py\n--- a/optuna/terminator/improvement/gp/botorch.py\n+++ b/optuna/terminator/improvement/gp/botorch.py\n@@ -3,6 +3,7 @@\n from typing import Optional\n \n import numpy as np\n+from packaging import version\n \n from optuna._imports import try_import\n from optuna.distributions import _is_distribution_log\n@@ -16,15 +17,20 @@\n \n \n with try_import() as _imports:\n- from botorch.fit import fit_gpytorch_model\n+ import botorch\n from botorch.models import SingleTaskGP\n from botorch.models.transforms import Normalize\n from botorch.models.transforms import Standardize\n import gpytorch\n import torch\n \n+ if version.parse(botorch.version.version) < version.parse(\"0.8.0\"):\n+ from botorch.fit import fit_gpytorch_model as fit_gpytorch_mll\n+ else:\n+ from botorch.fit import fit_gpytorch_mll\n+\n __all__ = [\n- \"fit_gpytorch_model\",\n+ \"fit_gpytorch_mll\",\n \"SingleTaskGP\",\n \"Normalize\",\n \"Standardize\",\n@@ -61,7 +67,7 @@\n \n mll = gpytorch.mlls.ExactMarginalLogLikelihood(self._gp.likelihood, self._gp)\n \n- fit_gpytorch_model(mll)\n+ fit_gpytorch_mll(mll)\n \n def predict_mean_std(\n self,\n", "issue": "Fix `Test` warnings from `BoTorch`\n### Motivation\n\nResolve following warnings from test:\r\n```\r\ntests/terminator_tests/improvement_tests/gp_tests/test_botorch.py: 4 warnings\r\ntests/visualization_tests/test_terminator_improvement.py: 46 warnings\r\n /opt/hostedtoolcache/Python/3.9.18/x64/lib/python3.9/site-packages/botorch/fit.py:139: DeprecationWarning:\r\n \r\n `fit_gpytorch_model` is marked for deprecation, consider using `fit_gpytorch_mll` instead.\r\n```\n\n### Suggestion\n\nLook for the proper way to fix this warning (Hopefully, just replacing `fit_gpytorch_model` with `fit_gpytorch_mll`).\n\n### Additional context (optional)\n\n_No response_\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Optional\n\nimport numpy as np\n\nfrom optuna._imports import try_import\nfrom optuna.distributions import _is_distribution_log\nfrom optuna.distributions import CategoricalDistribution\nfrom optuna.distributions import FloatDistribution\nfrom optuna.distributions import IntDistribution\nfrom optuna.search_space import intersection_search_space\nfrom optuna.terminator.improvement.gp.base import BaseGaussianProcess\nfrom optuna.trial._frozen import FrozenTrial\nfrom optuna.trial._state import TrialState\n\n\nwith try_import() as _imports:\n from botorch.fit import fit_gpytorch_model\n from botorch.models import SingleTaskGP\n from botorch.models.transforms import Normalize\n from botorch.models.transforms import Standardize\n import gpytorch\n import torch\n\n__all__ = [\n \"fit_gpytorch_model\",\n \"SingleTaskGP\",\n \"Normalize\",\n \"Standardize\",\n \"gpytorch\",\n \"torch\",\n]\n\n\nclass _BoTorchGaussianProcess(BaseGaussianProcess):\n def __init__(self) -> None:\n _imports.check()\n\n self._gp: Optional[SingleTaskGP] = None\n\n def fit(\n self,\n trials: list[FrozenTrial],\n ) -> None:\n self._trials = trials\n\n x, bounds = _convert_trials_to_tensors(trials)\n\n n_params = x.shape[1]\n\n y = torch.tensor([trial.value for trial in trials], dtype=torch.float64)\n y = torch.unsqueeze(y, 1)\n\n self._gp = SingleTaskGP(\n x,\n y,\n input_transform=Normalize(d=n_params, bounds=bounds),\n outcome_transform=Standardize(m=1),\n )\n\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(self._gp.likelihood, self._gp)\n\n fit_gpytorch_model(mll)\n\n def predict_mean_std(\n self,\n trials: list[FrozenTrial],\n ) -> tuple[np.ndarray, np.ndarray]:\n assert self._gp is not None\n\n x, _ = _convert_trials_to_tensors(trials)\n\n with torch.no_grad(), gpytorch.settings.fast_pred_var():\n posterior = self._gp.posterior(x)\n mean = posterior.mean\n variance = posterior.variance\n std = variance.sqrt()\n\n return mean.detach().numpy(), std.detach().numpy()\n\n\ndef _convert_trials_to_tensors(trials: list[FrozenTrial]) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Convert a list of FrozenTrial objects to tensors inputs and bounds.\n\n This function assumes the following condition for input trials:\n - any categorical param is converted to a float or int one;\n - log is unscaled for any float/int distribution;\n - the state is COMPLETE for any trial;\n - direction is MINIMIZE for any trial.\n \"\"\"\n search_space = intersection_search_space(trials)\n sorted_params = sorted(search_space.keys())\n\n x = []\n for trial in trials:\n assert trial.state == TrialState.COMPLETE\n x_row = []\n for param in sorted_params:\n distribution = search_space[param]\n\n assert not _is_distribution_log(distribution)\n assert not isinstance(distribution, CategoricalDistribution)\n\n param_value = float(trial.params[param])\n x_row.append(param_value)\n\n x.append(x_row)\n\n min_bounds = []\n max_bounds = []\n for param, distribution in search_space.items():\n assert isinstance(distribution, (FloatDistribution, IntDistribution))\n min_bounds.append(distribution.low)\n max_bounds.append(distribution.high)\n bounds = [min_bounds, max_bounds]\n\n return torch.tensor(x, dtype=torch.float64), torch.tensor(bounds, dtype=torch.float64)\n", "path": "optuna/terminator/improvement/gp/botorch.py"}]}
1,809
374
gh_patches_debug_8875
rasdani/github-patches
git_diff
microsoft__botbuilder-python-1401
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add tests for SkillHttpClient see dotnet and javascript imp [enhancement] </issue> <code> [start of libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 from logging import Logger 5 6 from botbuilder.core import InvokeResponse 7 from botbuilder.integration.aiohttp import BotFrameworkHttpClient 8 from botbuilder.core.skills import ( 9 ConversationIdFactoryBase, 10 SkillConversationIdFactoryOptions, 11 BotFrameworkSkill, 12 ) 13 from botbuilder.schema import Activity 14 from botframework.connector.auth import ( 15 AuthenticationConstants, 16 ChannelProvider, 17 GovernmentConstants, 18 SimpleCredentialProvider, 19 ) 20 21 22 class SkillHttpClient(BotFrameworkHttpClient): 23 def __init__( 24 self, 25 credential_provider: SimpleCredentialProvider, 26 skill_conversation_id_factory: ConversationIdFactoryBase, 27 channel_provider: ChannelProvider = None, 28 logger: Logger = None, 29 ): 30 if not skill_conversation_id_factory: 31 raise TypeError( 32 "SkillHttpClient(): skill_conversation_id_factory can't be None" 33 ) 34 35 super().__init__(credential_provider) 36 37 self._skill_conversation_id_factory = skill_conversation_id_factory 38 self._channel_provider = channel_provider 39 40 async def post_activity_to_skill( 41 self, 42 from_bot_id: str, 43 to_skill: BotFrameworkSkill, 44 service_url: str, 45 activity: Activity, 46 originating_audience: str = None, 47 ) -> InvokeResponse: 48 49 if originating_audience is None: 50 originating_audience = ( 51 GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE 52 if self._channel_provider is not None 53 and self._channel_provider.IsGovernment() 54 else AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE 55 ) 56 57 options = SkillConversationIdFactoryOptions( 58 from_bot_oauth_scope=originating_audience, 59 from_bot_id=from_bot_id, 60 activity=activity, 61 bot_framework_skill=to_skill, 62 ) 63 64 skill_conversation_id = await self._skill_conversation_id_factory.create_skill_conversation_id( 65 options 66 ) 67 68 return await super().post_activity( 69 from_bot_id, 70 to_skill.app_id, 71 to_skill.skill_endpoint, 72 service_url, 73 skill_conversation_id, 74 activity, 75 ) 76 [end of libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py --- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py +++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py @@ -50,7 +50,7 @@ originating_audience = ( GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE if self._channel_provider is not None - and self._channel_provider.IsGovernment() + and self._channel_provider.is_government() else AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE )
{"golden_diff": "diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py\n--- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py\n+++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py\n@@ -50,7 +50,7 @@\n originating_audience = (\n GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n if self._channel_provider is not None\n- and self._channel_provider.IsGovernment()\n+ and self._channel_provider.is_government()\n else AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n )\n", "issue": "Add tests for SkillHttpClient\nsee dotnet and javascript imp\r\n\r\n[enhancement]\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom logging import Logger\n\nfrom botbuilder.core import InvokeResponse\nfrom botbuilder.integration.aiohttp import BotFrameworkHttpClient\nfrom botbuilder.core.skills import (\n ConversationIdFactoryBase,\n SkillConversationIdFactoryOptions,\n BotFrameworkSkill,\n)\nfrom botbuilder.schema import Activity\nfrom botframework.connector.auth import (\n AuthenticationConstants,\n ChannelProvider,\n GovernmentConstants,\n SimpleCredentialProvider,\n)\n\n\nclass SkillHttpClient(BotFrameworkHttpClient):\n def __init__(\n self,\n credential_provider: SimpleCredentialProvider,\n skill_conversation_id_factory: ConversationIdFactoryBase,\n channel_provider: ChannelProvider = None,\n logger: Logger = None,\n ):\n if not skill_conversation_id_factory:\n raise TypeError(\n \"SkillHttpClient(): skill_conversation_id_factory can't be None\"\n )\n\n super().__init__(credential_provider)\n\n self._skill_conversation_id_factory = skill_conversation_id_factory\n self._channel_provider = channel_provider\n\n async def post_activity_to_skill(\n self,\n from_bot_id: str,\n to_skill: BotFrameworkSkill,\n service_url: str,\n activity: Activity,\n originating_audience: str = None,\n ) -> InvokeResponse:\n\n if originating_audience is None:\n originating_audience = (\n GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n if self._channel_provider is not None\n and self._channel_provider.IsGovernment()\n else AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n )\n\n options = SkillConversationIdFactoryOptions(\n from_bot_oauth_scope=originating_audience,\n from_bot_id=from_bot_id,\n activity=activity,\n bot_framework_skill=to_skill,\n )\n\n skill_conversation_id = await self._skill_conversation_id_factory.create_skill_conversation_id(\n options\n )\n\n return await super().post_activity(\n from_bot_id,\n to_skill.app_id,\n to_skill.skill_endpoint,\n service_url,\n skill_conversation_id,\n activity,\n )\n", "path": "libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py"}]}
1,190
190
gh_patches_debug_803
rasdani/github-patches
git_diff
googleapis__google-api-python-client-871
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AttributeError: module 'googleapiclient' has no attribute '__version__' When importing new version of google-api-python-client `from apiclient import discovery` i'm getting the error `AttributeError: module 'googleapiclient' has no attribute '__version__'` https://github.com/googleapis/google-api-python-client/blob/84d45619d753cb04d957651886231034194058b6/apiclient/__init__.py#L22 i guess this happens since you have removed `__version__` var from `googleapiclient/__init__.py` https://github.com/googleapis/google-api-python-client/commit/f706cfd821ab7457e5db37abfc3619772657dd0e#diff-b926d296d4c856bcbf877809e4523562L15 can you please fix? @busunkim96 @mik-laj @crwilcox Traceback: ``` from apiclient import discovery File "/usr/local/lib/python3.7/site-packages/apiclient/__init__.py", line 22, in <module> __version__ = googleapiclient.__version__ AttributeError: module 'googleapiclient' has no attribute '__version__' </issue> <code> [start of apiclient/__init__.py] 1 """Retain apiclient as an alias for googleapiclient.""" 2 3 from six import iteritems 4 5 import googleapiclient 6 7 from googleapiclient import channel 8 from googleapiclient import discovery 9 from googleapiclient import errors 10 from googleapiclient import http 11 from googleapiclient import mimeparse 12 from googleapiclient import model 13 14 try: 15 from googleapiclient import sample_tools 16 except ImportError: 17 # Silently ignore, because the vast majority of consumers won't use it and 18 # it has deep dependence on oauth2client, an optional dependency. 19 sample_tools = None 20 from googleapiclient import schema 21 22 __version__ = googleapiclient.__version__ 23 24 _SUBMODULES = { 25 "channel": channel, 26 "discovery": discovery, 27 "errors": errors, 28 "http": http, 29 "mimeparse": mimeparse, 30 "model": model, 31 "sample_tools": sample_tools, 32 "schema": schema, 33 } 34 35 import sys 36 37 for module_name, module in iteritems(_SUBMODULES): 38 sys.modules["apiclient.%s" % module_name] = module 39 [end of apiclient/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apiclient/__init__.py b/apiclient/__init__.py --- a/apiclient/__init__.py +++ b/apiclient/__init__.py @@ -19,8 +19,6 @@ sample_tools = None from googleapiclient import schema -__version__ = googleapiclient.__version__ - _SUBMODULES = { "channel": channel, "discovery": discovery,
{"golden_diff": "diff --git a/apiclient/__init__.py b/apiclient/__init__.py\n--- a/apiclient/__init__.py\n+++ b/apiclient/__init__.py\n@@ -19,8 +19,6 @@\n sample_tools = None\n from googleapiclient import schema\n \n-__version__ = googleapiclient.__version__\n-\n _SUBMODULES = {\n \"channel\": channel,\n \"discovery\": discovery,\n", "issue": "AttributeError: module 'googleapiclient' has no attribute '__version__'\nWhen importing new version of google-api-python-client\r\n`from apiclient import discovery`\r\ni'm getting the error\r\n`AttributeError: module 'googleapiclient' has no attribute '__version__'`\r\n\r\nhttps://github.com/googleapis/google-api-python-client/blob/84d45619d753cb04d957651886231034194058b6/apiclient/__init__.py#L22\r\n\r\ni guess this happens since you have removed `__version__` var from `googleapiclient/__init__.py`\r\n\r\nhttps://github.com/googleapis/google-api-python-client/commit/f706cfd821ab7457e5db37abfc3619772657dd0e#diff-b926d296d4c856bcbf877809e4523562L15\r\n\r\ncan you please fix? @busunkim96 @mik-laj @crwilcox \r\n\r\nTraceback:\r\n``` from apiclient import discovery\r\n File \"/usr/local/lib/python3.7/site-packages/apiclient/__init__.py\", line 22, in <module>\r\n __version__ = googleapiclient.__version__\r\nAttributeError: module 'googleapiclient' has no attribute '__version__'\n", "before_files": [{"content": "\"\"\"Retain apiclient as an alias for googleapiclient.\"\"\"\n\nfrom six import iteritems\n\nimport googleapiclient\n\nfrom googleapiclient import channel\nfrom googleapiclient import discovery\nfrom googleapiclient import errors\nfrom googleapiclient import http\nfrom googleapiclient import mimeparse\nfrom googleapiclient import model\n\ntry:\n from googleapiclient import sample_tools\nexcept ImportError:\n # Silently ignore, because the vast majority of consumers won't use it and\n # it has deep dependence on oauth2client, an optional dependency.\n sample_tools = None\nfrom googleapiclient import schema\n\n__version__ = googleapiclient.__version__\n\n_SUBMODULES = {\n \"channel\": channel,\n \"discovery\": discovery,\n \"errors\": errors,\n \"http\": http,\n \"mimeparse\": mimeparse,\n \"model\": model,\n \"sample_tools\": sample_tools,\n \"schema\": schema,\n}\n\nimport sys\n\nfor module_name, module in iteritems(_SUBMODULES):\n sys.modules[\"apiclient.%s\" % module_name] = module\n", "path": "apiclient/__init__.py"}]}
1,183
99
gh_patches_debug_19691
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-2967
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Invalid Aliases when using SSM dynamic references ### CloudFormation Lint Version 0.83.0 ### What operating system are you using? Mac ### Describe the bug When using a dynamic reference to resolve the Alias domain, cfn-lint fails indicating it's an invalid alias. Shouldn't the code check if this is a `REGEX_DYN_REF` in https://github.com/aws-cloudformation/cfn-lint/blob/main/src/cfnlint/rules/resources/cloudfront/Aliases.py and ignore if so? A workaround would be to use "!Sub" which apparently is ignored already (`FUNCTIONS`). Shouldn't we also ignore when `REGEX_DYN_REF`? ### Expected behavior E3013 shouldn't be informed, since there's no way to validate the dynamic-reference value from cfn-lint perspective (?) ### Reproduction template ``` CloudFront: Type: AWS::CloudFront::Distribution Properties: DistributionConfig: Enabled: true Aliases: - "{{resolve:ssm:/env/fqdns/certifier}}" DefaultRootObject: index.html ``` </issue> <code> [start of src/cfnlint/rules/resources/cloudfront/Aliases.py] 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 import regex as re 6 7 from cfnlint.helpers import FUNCTIONS 8 from cfnlint.rules import CloudFormationLintRule, RuleMatch 9 10 11 class Aliases(CloudFormationLintRule): 12 """Check if CloudFront Aliases are valid domain names""" 13 14 id = "E3013" 15 shortdesc = "CloudFront Aliases" 16 description = "CloudFront aliases should contain valid domain names" 17 source_url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-aliases" 18 tags = ["properties", "cloudfront"] 19 20 def match(self, cfn): 21 """Check cloudfront Resource Parameters""" 22 23 matches = [] 24 25 valid_domain = re.compile( 26 r"^(?:[a-z0-9\*](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$" 27 ) 28 29 results = cfn.get_resource_properties( 30 ["AWS::CloudFront::Distribution", "DistributionConfig"] 31 ) 32 for result in results: 33 aliases = result["Value"].get("Aliases") 34 if aliases: 35 for alias in aliases: 36 if isinstance(alias, str) and alias not in FUNCTIONS: 37 wildcard = alias.split(".") 38 if "*" in wildcard[1:]: 39 path = result["Path"] + ["Aliases"] 40 message = f'Invalid use of wildcards: {alias} at {"/".join(result["Path"])}' 41 matches.append(RuleMatch(path, message)) 42 if not re.match(valid_domain, alias): 43 path = result["Path"] + ["Aliases"] 44 message = f'Invalid alias found: {alias} at {"/".join(result["Path"])}' 45 matches.append(RuleMatch(path, message)) 46 47 return matches 48 [end of src/cfnlint/rules/resources/cloudfront/Aliases.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/resources/cloudfront/Aliases.py b/src/cfnlint/rules/resources/cloudfront/Aliases.py --- a/src/cfnlint/rules/resources/cloudfront/Aliases.py +++ b/src/cfnlint/rules/resources/cloudfront/Aliases.py @@ -4,7 +4,7 @@ """ import regex as re -from cfnlint.helpers import FUNCTIONS +from cfnlint.helpers import FUNCTIONS, REGEX_DYN_REF from cfnlint.rules import CloudFormationLintRule, RuleMatch @@ -35,6 +35,8 @@ for alias in aliases: if isinstance(alias, str) and alias not in FUNCTIONS: wildcard = alias.split(".") + if re.match(REGEX_DYN_REF, alias): + continue if "*" in wildcard[1:]: path = result["Path"] + ["Aliases"] message = f'Invalid use of wildcards: {alias} at {"/".join(result["Path"])}'
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/cloudfront/Aliases.py b/src/cfnlint/rules/resources/cloudfront/Aliases.py\n--- a/src/cfnlint/rules/resources/cloudfront/Aliases.py\n+++ b/src/cfnlint/rules/resources/cloudfront/Aliases.py\n@@ -4,7 +4,7 @@\n \"\"\"\n import regex as re\n \n-from cfnlint.helpers import FUNCTIONS\n+from cfnlint.helpers import FUNCTIONS, REGEX_DYN_REF\n from cfnlint.rules import CloudFormationLintRule, RuleMatch\n \n \n@@ -35,6 +35,8 @@\n for alias in aliases:\n if isinstance(alias, str) and alias not in FUNCTIONS:\n wildcard = alias.split(\".\")\n+ if re.match(REGEX_DYN_REF, alias):\n+ continue\n if \"*\" in wildcard[1:]:\n path = result[\"Path\"] + [\"Aliases\"]\n message = f'Invalid use of wildcards: {alias} at {\"/\".join(result[\"Path\"])}'\n", "issue": "Invalid Aliases when using SSM dynamic references\n### CloudFormation Lint Version\r\n\r\n0.83.0\r\n\r\n### What operating system are you using?\r\n\r\nMac\r\n\r\n### Describe the bug\r\n\r\nWhen using a dynamic reference to resolve the Alias domain, cfn-lint fails indicating it's an invalid alias. Shouldn't the code check if this is a `REGEX_DYN_REF` in https://github.com/aws-cloudformation/cfn-lint/blob/main/src/cfnlint/rules/resources/cloudfront/Aliases.py and ignore if so?\r\n\r\nA workaround would be to use \"!Sub\" which apparently is ignored already (`FUNCTIONS`). Shouldn't we also ignore when `REGEX_DYN_REF`?\r\n\r\n### Expected behavior\r\n\r\nE3013 shouldn't be informed, since there's no way to validate the dynamic-reference value from cfn-lint perspective (?)\r\n\r\n### Reproduction template\r\n\r\n```\r\nCloudFront:\r\n Type: AWS::CloudFront::Distribution\r\n Properties:\r\n DistributionConfig:\r\n Enabled: true\r\n Aliases:\r\n - \"{{resolve:ssm:/env/fqdns/certifier}}\"\r\n DefaultRootObject: index.html\r\n``` \n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport regex as re\n\nfrom cfnlint.helpers import FUNCTIONS\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass Aliases(CloudFormationLintRule):\n \"\"\"Check if CloudFront Aliases are valid domain names\"\"\"\n\n id = \"E3013\"\n shortdesc = \"CloudFront Aliases\"\n description = \"CloudFront aliases should contain valid domain names\"\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-aliases\"\n tags = [\"properties\", \"cloudfront\"]\n\n def match(self, cfn):\n \"\"\"Check cloudfront Resource Parameters\"\"\"\n\n matches = []\n\n valid_domain = re.compile(\n r\"^(?:[a-z0-9\\*](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$\"\n )\n\n results = cfn.get_resource_properties(\n [\"AWS::CloudFront::Distribution\", \"DistributionConfig\"]\n )\n for result in results:\n aliases = result[\"Value\"].get(\"Aliases\")\n if aliases:\n for alias in aliases:\n if isinstance(alias, str) and alias not in FUNCTIONS:\n wildcard = alias.split(\".\")\n if \"*\" in wildcard[1:]:\n path = result[\"Path\"] + [\"Aliases\"]\n message = f'Invalid use of wildcards: {alias} at {\"/\".join(result[\"Path\"])}'\n matches.append(RuleMatch(path, message))\n if not re.match(valid_domain, alias):\n path = result[\"Path\"] + [\"Aliases\"]\n message = f'Invalid alias found: {alias} at {\"/\".join(result[\"Path\"])}'\n matches.append(RuleMatch(path, message))\n\n return matches\n", "path": "src/cfnlint/rules/resources/cloudfront/Aliases.py"}]}
1,317
214
gh_patches_debug_13562
rasdani/github-patches
git_diff
projectmesa__mesa-1984
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> JupyterViz space view limits **What's the problem this feature will solve?** At the moment, when drawing spaces with JupyterViz, the axes limits are left to be automatically determined by Matplotlib. Would it not make more sense (in particular of continuous spaces, to derive xlim and xmax from the space itself? **Describe the solution you'd like** The simplest fix would be to adapt `_draw_grid` to use `space.width` and `space.height` while `_draw_continuos_space` uses `space.x_min`, `space.x_max`, `space.y_min`, and `space.y_max`. Are there reasons where the user might want to rely on matpltolib's automatic determination of the bounds? If so, then a slightly more sophisticated solution would be needed where kwargs are passed to from jupyterviz to the underlying space_drawer function. So you would add a `space_drawer_kwargs` keyword argument. </issue> <code> [start of mesa/experimental/components/matplotlib.py] 1 from typing import Optional 2 3 import networkx as nx 4 import solara 5 from matplotlib.figure import Figure 6 from matplotlib.ticker import MaxNLocator 7 8 import mesa 9 10 11 @solara.component 12 def SpaceMatplotlib(model, agent_portrayal, dependencies: Optional[list[any]] = None): 13 space_fig = Figure() 14 space_ax = space_fig.subplots() 15 space = getattr(model, "grid", None) 16 if space is None: 17 # Sometimes the space is defined as model.space instead of model.grid 18 space = model.space 19 if isinstance(space, mesa.space.NetworkGrid): 20 _draw_network_grid(space, space_ax, agent_portrayal) 21 elif isinstance(space, mesa.space.ContinuousSpace): 22 _draw_continuous_space(space, space_ax, agent_portrayal) 23 else: 24 _draw_grid(space, space_ax, agent_portrayal) 25 space_ax.set_axis_off() 26 solara.FigureMatplotlib(space_fig, format="png", dependencies=dependencies) 27 28 29 def _draw_grid(space, space_ax, agent_portrayal): 30 def portray(g): 31 x = [] 32 y = [] 33 s = [] # size 34 c = [] # color 35 for i in range(g.width): 36 for j in range(g.height): 37 content = g._grid[i][j] 38 if not content: 39 continue 40 if not hasattr(content, "__iter__"): 41 # Is a single grid 42 content = [content] 43 for agent in content: 44 data = agent_portrayal(agent) 45 x.append(i) 46 y.append(j) 47 if "size" in data: 48 s.append(data["size"]) 49 if "color" in data: 50 c.append(data["color"]) 51 out = {"x": x, "y": y} 52 if len(s) > 0: 53 out["s"] = s 54 if len(c) > 0: 55 out["c"] = c 56 return out 57 58 space_ax.scatter(**portray(space)) 59 60 61 def _draw_network_grid(space, space_ax, agent_portrayal): 62 graph = space.G 63 pos = nx.spring_layout(graph, seed=0) 64 nx.draw( 65 graph, 66 ax=space_ax, 67 pos=pos, 68 **agent_portrayal(graph), 69 ) 70 71 72 def _draw_continuous_space(space, space_ax, agent_portrayal): 73 def portray(space): 74 x = [] 75 y = [] 76 s = [] # size 77 c = [] # color 78 for agent in space._agent_to_index: 79 data = agent_portrayal(agent) 80 _x, _y = agent.pos 81 x.append(_x) 82 y.append(_y) 83 if "size" in data: 84 s.append(data["size"]) 85 if "color" in data: 86 c.append(data["color"]) 87 out = {"x": x, "y": y} 88 if len(s) > 0: 89 out["s"] = s 90 if len(c) > 0: 91 out["c"] = c 92 return out 93 94 space_ax.scatter(**portray(space)) 95 96 97 def make_plot(model, measure): 98 fig = Figure() 99 ax = fig.subplots() 100 df = model.datacollector.get_model_vars_dataframe() 101 if isinstance(measure, str): 102 ax.plot(df.loc[:, measure]) 103 ax.set_ylabel(measure) 104 elif isinstance(measure, dict): 105 for m, color in measure.items(): 106 ax.plot(df.loc[:, m], label=m, color=color) 107 fig.legend() 108 elif isinstance(measure, (list, tuple)): 109 for m in measure: 110 ax.plot(df.loc[:, m], label=m) 111 fig.legend() 112 # Set integer x axis 113 ax.xaxis.set_major_locator(MaxNLocator(integer=True)) 114 solara.FigureMatplotlib(fig) 115 [end of mesa/experimental/components/matplotlib.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mesa/experimental/components/matplotlib.py b/mesa/experimental/components/matplotlib.py --- a/mesa/experimental/components/matplotlib.py +++ b/mesa/experimental/components/matplotlib.py @@ -55,6 +55,8 @@ out["c"] = c return out + space_ax.set_xlim(-1, space.width) + space_ax.set_ylim(-1, space.height) space_ax.scatter(**portray(space)) @@ -91,6 +93,14 @@ out["c"] = c return out + width = space.x_max - space.x_min + x_padding = width / 20 + height = space.y_max - space.y_min + y_padding = height / 20 + space_ax.set_xlim(space.x_min - x_padding, space.x_max + x_padding) + space_ax.set_ylim(space.y_min - y_padding, space.y_max + y_padding) + space_ax.scatter(**portray(space)) + space_ax.scatter(**portray(space))
{"golden_diff": "diff --git a/mesa/experimental/components/matplotlib.py b/mesa/experimental/components/matplotlib.py\n--- a/mesa/experimental/components/matplotlib.py\n+++ b/mesa/experimental/components/matplotlib.py\n@@ -55,6 +55,8 @@\n out[\"c\"] = c\n return out\n \n+ space_ax.set_xlim(-1, space.width)\n+ space_ax.set_ylim(-1, space.height)\n space_ax.scatter(**portray(space))\n \n \n@@ -91,6 +93,14 @@\n out[\"c\"] = c\n return out\n \n+ width = space.x_max - space.x_min\n+ x_padding = width / 20\n+ height = space.y_max - space.y_min\n+ y_padding = height / 20\n+ space_ax.set_xlim(space.x_min - x_padding, space.x_max + x_padding)\n+ space_ax.set_ylim(space.y_min - y_padding, space.y_max + y_padding)\n+ space_ax.scatter(**portray(space))\n+\n space_ax.scatter(**portray(space))\n", "issue": "JupyterViz space view limits\n**What's the problem this feature will solve?**\r\nAt the moment, when drawing spaces with JupyterViz, the axes limits are left to be automatically determined by Matplotlib. Would it not make more sense (in particular of continuous spaces, to derive xlim and xmax from the space itself?\r\n\r\n**Describe the solution you'd like**\r\nThe simplest fix would be to adapt `_draw_grid` to use `space.width` and `space.height` while `_draw_continuos_space` uses `space.x_min`, `space.x_max`, `space.y_min`, and `space.y_max`. Are there reasons where the user might want to rely on matpltolib's automatic determination of the bounds? If so, then a slightly more sophisticated solution would be needed where kwargs are passed to from jupyterviz to the underlying space_drawer function. So you would add a `space_drawer_kwargs` keyword argument.\r\n\r\n\n", "before_files": [{"content": "from typing import Optional\n\nimport networkx as nx\nimport solara\nfrom matplotlib.figure import Figure\nfrom matplotlib.ticker import MaxNLocator\n\nimport mesa\n\n\[email protected]\ndef SpaceMatplotlib(model, agent_portrayal, dependencies: Optional[list[any]] = None):\n space_fig = Figure()\n space_ax = space_fig.subplots()\n space = getattr(model, \"grid\", None)\n if space is None:\n # Sometimes the space is defined as model.space instead of model.grid\n space = model.space\n if isinstance(space, mesa.space.NetworkGrid):\n _draw_network_grid(space, space_ax, agent_portrayal)\n elif isinstance(space, mesa.space.ContinuousSpace):\n _draw_continuous_space(space, space_ax, agent_portrayal)\n else:\n _draw_grid(space, space_ax, agent_portrayal)\n space_ax.set_axis_off()\n solara.FigureMatplotlib(space_fig, format=\"png\", dependencies=dependencies)\n\n\ndef _draw_grid(space, space_ax, agent_portrayal):\n def portray(g):\n x = []\n y = []\n s = [] # size\n c = [] # color\n for i in range(g.width):\n for j in range(g.height):\n content = g._grid[i][j]\n if not content:\n continue\n if not hasattr(content, \"__iter__\"):\n # Is a single grid\n content = [content]\n for agent in content:\n data = agent_portrayal(agent)\n x.append(i)\n y.append(j)\n if \"size\" in data:\n s.append(data[\"size\"])\n if \"color\" in data:\n c.append(data[\"color\"])\n out = {\"x\": x, \"y\": y}\n if len(s) > 0:\n out[\"s\"] = s\n if len(c) > 0:\n out[\"c\"] = c\n return out\n\n space_ax.scatter(**portray(space))\n\n\ndef _draw_network_grid(space, space_ax, agent_portrayal):\n graph = space.G\n pos = nx.spring_layout(graph, seed=0)\n nx.draw(\n graph,\n ax=space_ax,\n pos=pos,\n **agent_portrayal(graph),\n )\n\n\ndef _draw_continuous_space(space, space_ax, agent_portrayal):\n def portray(space):\n x = []\n y = []\n s = [] # size\n c = [] # color\n for agent in space._agent_to_index:\n data = agent_portrayal(agent)\n _x, _y = agent.pos\n x.append(_x)\n y.append(_y)\n if \"size\" in data:\n s.append(data[\"size\"])\n if \"color\" in data:\n c.append(data[\"color\"])\n out = {\"x\": x, \"y\": y}\n if len(s) > 0:\n out[\"s\"] = s\n if len(c) > 0:\n out[\"c\"] = c\n return out\n\n space_ax.scatter(**portray(space))\n\n\ndef make_plot(model, measure):\n fig = Figure()\n ax = fig.subplots()\n df = model.datacollector.get_model_vars_dataframe()\n if isinstance(measure, str):\n ax.plot(df.loc[:, measure])\n ax.set_ylabel(measure)\n elif isinstance(measure, dict):\n for m, color in measure.items():\n ax.plot(df.loc[:, m], label=m, color=color)\n fig.legend()\n elif isinstance(measure, (list, tuple)):\n for m in measure:\n ax.plot(df.loc[:, m], label=m)\n fig.legend()\n # Set integer x axis\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n solara.FigureMatplotlib(fig)\n", "path": "mesa/experimental/components/matplotlib.py"}]}
1,785
234
gh_patches_debug_26616
rasdani/github-patches
git_diff
kivy__kivy-3859
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Button behavior has a property MIN_STATE_TIME This property can't be used in kv cause of the uppercase. </issue> <code> [start of kivy/uix/behaviors/button.py] 1 '''See :class:`ButtonBehavior` for details. 2 ''' 3 4 __all__ = ('ButtonBehavior', ) 5 6 from kivy.clock import Clock 7 from kivy.properties import OptionProperty, ObjectProperty, BooleanProperty 8 from time import time 9 10 11 class ButtonBehavior(object): 12 ''' 13 This `mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides 14 :class:`~kivy.uix.button.Button` behavior. 15 16 :Events: 17 `on_press` 18 Fired when the button is pressed. 19 `on_release` 20 Fired when the button is released (i.e. the touch/click that 21 pressed the button goes away). 22 ''' 23 24 state = OptionProperty('normal', options=('normal', 'down')) 25 '''The state of the button, must be one of 'normal' or 'down'. 26 The state is 'down' only when the button is currently touched/clicked, 27 otherwise its 'normal'. 28 29 :attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults 30 to 'normal'. 31 ''' 32 33 last_touch = ObjectProperty(None) 34 '''Contains the last relevant touch received by the Button. This can 35 be used in `on_press` or `on_release` in order to know which touch 36 dispatched the event. 37 38 .. versionadded:: 1.8.0 39 40 :attr:`last_touch` is a :class:`~kivy.properties.ObjectProperty` and 41 defaults to `None`. 42 ''' 43 44 MIN_STATE_TIME = 0.035 45 '''The minimum period of time which the widget must remain in the 46 `'down'` state. 47 48 :attr:`MIN_STATE_TIME` is a float and defaults to 0.035. 49 ''' 50 51 always_release = BooleanProperty(True) 52 '''This determines whether or not the widget fires an `on_release` event if 53 the touch_up is outside the widget. 54 55 .. versionadded:: 1.9.0 56 57 :attr:`always_release` is a :class:`~kivy.properties.BooleanProperty` and 58 defaults to `True`. 59 ''' 60 61 def __init__(self, **kwargs): 62 self.register_event_type('on_press') 63 self.register_event_type('on_release') 64 super(ButtonBehavior, self).__init__(**kwargs) 65 self.__state_event = None 66 self.__touch_time = None 67 self.fbind('state', self.cancel_event) 68 69 def _do_press(self): 70 self.state = 'down' 71 72 def _do_release(self, *args): 73 self.state = 'normal' 74 75 def cancel_event(self, *args): 76 if self.__state_event: 77 self.__state_event.cancel() 78 self.__state_event = None 79 80 def on_touch_down(self, touch): 81 if super(ButtonBehavior, self).on_touch_down(touch): 82 return True 83 if touch.is_mouse_scrolling: 84 return False 85 if not self.collide_point(touch.x, touch.y): 86 return False 87 if self in touch.ud: 88 return False 89 touch.grab(self) 90 touch.ud[self] = True 91 self.last_touch = touch 92 self.__touch_time = time() 93 self._do_press() 94 self.dispatch('on_press') 95 return True 96 97 def on_touch_move(self, touch): 98 if touch.grab_current is self: 99 return True 100 if super(ButtonBehavior, self).on_touch_move(touch): 101 return True 102 return self in touch.ud 103 104 def on_touch_up(self, touch): 105 if touch.grab_current is not self: 106 return super(ButtonBehavior, self).on_touch_up(touch) 107 assert(self in touch.ud) 108 touch.ungrab(self) 109 self.last_touch = touch 110 111 if (not self.always_release 112 and not self.collide_point(*touch.pos)): 113 self.state = 'normal' 114 return 115 116 touchtime = time() - self.__touch_time 117 if touchtime < self.MIN_STATE_TIME: 118 self.__state_event = Clock.schedule_once( 119 self._do_release, self.MIN_STATE_TIME - touchtime) 120 else: 121 self._do_release() 122 self.dispatch('on_release') 123 return True 124 125 def on_press(self): 126 pass 127 128 def on_release(self): 129 pass 130 131 def trigger_action(self, duration=0.1): 132 '''Trigger whatever action(s) have been bound to the button by calling 133 both the on_press and on_release callbacks. 134 135 This simulates a quick button press without using any touch events. 136 137 Duration is the length of the press in seconds. Pass 0 if you want 138 the action to happen instantly. 139 140 .. versionadded:: 1.8.0 141 ''' 142 self._do_press() 143 self.dispatch('on_press') 144 145 def trigger_release(dt): 146 self._do_release() 147 self.dispatch('on_release') 148 if not duration: 149 trigger_release(0) 150 else: 151 Clock.schedule_once(trigger_release, duration) 152 [end of kivy/uix/behaviors/button.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kivy/uix/behaviors/button.py b/kivy/uix/behaviors/button.py --- a/kivy/uix/behaviors/button.py +++ b/kivy/uix/behaviors/button.py @@ -4,8 +4,10 @@ __all__ = ('ButtonBehavior', ) from kivy.clock import Clock -from kivy.properties import OptionProperty, ObjectProperty, BooleanProperty +from kivy.properties import OptionProperty, ObjectProperty, \ + BooleanProperty, NumericProperty, AliasProperty from time import time +from kivy.logger import Logger class ButtonBehavior(object): @@ -41,13 +43,26 @@ defaults to `None`. ''' - MIN_STATE_TIME = 0.035 + min_state_time = NumericProperty(0.035) '''The minimum period of time which the widget must remain in the `'down'` state. - :attr:`MIN_STATE_TIME` is a float and defaults to 0.035. + :attr:`min_state_time` is a float and defaults to 0.035. ''' + def _set_min_state_time(self, value): + Logger.warning( + 'MIN_STATE_TIME is deprecated, use min_state_time instead') + self.min_state_time = value + + def _get_min_state_time(self): + Logger.warning( + 'MIN_STATE_TIME is deprecated, use min_state_time instead') + return self.min_state_time + + MIN_STATE_TIME = AliasProperty( + _get_min_state_time, _set_min_state_time, bind=('min_state_time', )) + always_release = BooleanProperty(True) '''This determines whether or not the widget fires an `on_release` event if the touch_up is outside the widget.
{"golden_diff": "diff --git a/kivy/uix/behaviors/button.py b/kivy/uix/behaviors/button.py\n--- a/kivy/uix/behaviors/button.py\n+++ b/kivy/uix/behaviors/button.py\n@@ -4,8 +4,10 @@\n __all__ = ('ButtonBehavior', )\n \n from kivy.clock import Clock\n-from kivy.properties import OptionProperty, ObjectProperty, BooleanProperty\n+from kivy.properties import OptionProperty, ObjectProperty, \\\n+ BooleanProperty, NumericProperty, AliasProperty\n from time import time\n+from kivy.logger import Logger\n \n \n class ButtonBehavior(object):\n@@ -41,13 +43,26 @@\n defaults to `None`.\n '''\n \n- MIN_STATE_TIME = 0.035\n+ min_state_time = NumericProperty(0.035)\n '''The minimum period of time which the widget must remain in the\n `'down'` state.\n \n- :attr:`MIN_STATE_TIME` is a float and defaults to 0.035.\n+ :attr:`min_state_time` is a float and defaults to 0.035.\n '''\n \n+ def _set_min_state_time(self, value):\n+ Logger.warning(\n+ 'MIN_STATE_TIME is deprecated, use min_state_time instead')\n+ self.min_state_time = value\n+\n+ def _get_min_state_time(self):\n+ Logger.warning(\n+ 'MIN_STATE_TIME is deprecated, use min_state_time instead')\n+ return self.min_state_time\n+\n+ MIN_STATE_TIME = AliasProperty(\n+ _get_min_state_time, _set_min_state_time, bind=('min_state_time', ))\n+\n always_release = BooleanProperty(True)\n '''This determines whether or not the widget fires an `on_release` event if\n the touch_up is outside the widget.\n", "issue": "Button behavior has a property MIN_STATE_TIME\nThis property can't be used in kv cause of the uppercase.\n\n", "before_files": [{"content": "'''See :class:`ButtonBehavior` for details.\n'''\n\n__all__ = ('ButtonBehavior', )\n\nfrom kivy.clock import Clock\nfrom kivy.properties import OptionProperty, ObjectProperty, BooleanProperty\nfrom time import time\n\n\nclass ButtonBehavior(object):\n '''\n This `mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides\n :class:`~kivy.uix.button.Button` behavior.\n\n :Events:\n `on_press`\n Fired when the button is pressed.\n `on_release`\n Fired when the button is released (i.e. the touch/click that\n pressed the button goes away).\n '''\n\n state = OptionProperty('normal', options=('normal', 'down'))\n '''The state of the button, must be one of 'normal' or 'down'.\n The state is 'down' only when the button is currently touched/clicked,\n otherwise its 'normal'.\n\n :attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults\n to 'normal'.\n '''\n\n last_touch = ObjectProperty(None)\n '''Contains the last relevant touch received by the Button. This can\n be used in `on_press` or `on_release` in order to know which touch\n dispatched the event.\n\n .. versionadded:: 1.8.0\n\n :attr:`last_touch` is a :class:`~kivy.properties.ObjectProperty` and\n defaults to `None`.\n '''\n\n MIN_STATE_TIME = 0.035\n '''The minimum period of time which the widget must remain in the\n `'down'` state.\n\n :attr:`MIN_STATE_TIME` is a float and defaults to 0.035.\n '''\n\n always_release = BooleanProperty(True)\n '''This determines whether or not the widget fires an `on_release` event if\n the touch_up is outside the widget.\n\n .. versionadded:: 1.9.0\n\n :attr:`always_release` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to `True`.\n '''\n\n def __init__(self, **kwargs):\n self.register_event_type('on_press')\n self.register_event_type('on_release')\n super(ButtonBehavior, self).__init__(**kwargs)\n self.__state_event = None\n self.__touch_time = None\n self.fbind('state', self.cancel_event)\n\n def _do_press(self):\n self.state = 'down'\n\n def _do_release(self, *args):\n self.state = 'normal'\n\n def cancel_event(self, *args):\n if self.__state_event:\n self.__state_event.cancel()\n self.__state_event = None\n\n def on_touch_down(self, touch):\n if super(ButtonBehavior, self).on_touch_down(touch):\n return True\n if touch.is_mouse_scrolling:\n return False\n if not self.collide_point(touch.x, touch.y):\n return False\n if self in touch.ud:\n return False\n touch.grab(self)\n touch.ud[self] = True\n self.last_touch = touch\n self.__touch_time = time()\n self._do_press()\n self.dispatch('on_press')\n return True\n\n def on_touch_move(self, touch):\n if touch.grab_current is self:\n return True\n if super(ButtonBehavior, self).on_touch_move(touch):\n return True\n return self in touch.ud\n\n def on_touch_up(self, touch):\n if touch.grab_current is not self:\n return super(ButtonBehavior, self).on_touch_up(touch)\n assert(self in touch.ud)\n touch.ungrab(self)\n self.last_touch = touch\n\n if (not self.always_release\n and not self.collide_point(*touch.pos)):\n self.state = 'normal'\n return\n\n touchtime = time() - self.__touch_time\n if touchtime < self.MIN_STATE_TIME:\n self.__state_event = Clock.schedule_once(\n self._do_release, self.MIN_STATE_TIME - touchtime)\n else:\n self._do_release()\n self.dispatch('on_release')\n return True\n\n def on_press(self):\n pass\n\n def on_release(self):\n pass\n\n def trigger_action(self, duration=0.1):\n '''Trigger whatever action(s) have been bound to the button by calling\n both the on_press and on_release callbacks.\n\n This simulates a quick button press without using any touch events.\n\n Duration is the length of the press in seconds. Pass 0 if you want\n the action to happen instantly.\n\n .. versionadded:: 1.8.0\n '''\n self._do_press()\n self.dispatch('on_press')\n\n def trigger_release(dt):\n self._do_release()\n self.dispatch('on_release')\n if not duration:\n trigger_release(0)\n else:\n Clock.schedule_once(trigger_release, duration)\n", "path": "kivy/uix/behaviors/button.py"}]}
1,983
402
gh_patches_debug_14403
rasdani/github-patches
git_diff
dbt-labs__dbt-core-4359
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ensure that all dbt invocations have an invocation_id This is a sub-issue of #4260. In order to make sure we can distinguish logs from different runs we should always have an invocation_id. If tracking is off, it isn't created today so we would need to create it. (to add it to logging, add it as a class property on the superclass) (previously considered: preserve process id like it is today, global run_id reported at start, or allow some sort of user markers). </issue> <code> [start of core/dbt/lib.py] 1 # TODO: this file is one big TODO 2 import os 3 from dbt.exceptions import RuntimeException 4 from dbt import flags 5 from collections import namedtuple 6 7 RuntimeArgs = namedtuple( 8 'RuntimeArgs', 'project_dir profiles_dir single_threaded profile_name' 9 ) 10 11 12 def get_dbt_config(project_dir, single_threaded=False): 13 from dbt.config.runtime import RuntimeConfig 14 import dbt.adapters.factory 15 16 if os.getenv('DBT_PROFILES_DIR'): 17 profiles_dir = os.getenv('DBT_PROFILES_DIR') 18 else: 19 profiles_dir = os.path.expanduser("~/.dbt") 20 21 # Construct a phony config 22 config = RuntimeConfig.from_args(RuntimeArgs( 23 project_dir, profiles_dir, single_threaded, 'user' 24 )) 25 # Clear previously registered adapters-- 26 # this fixes cacheing behavior on the dbt-server 27 flags.set_from_args('', config) 28 dbt.adapters.factory.reset_adapters() 29 # Load the relevant adapter 30 dbt.adapters.factory.register_adapter(config) 31 32 return config 33 34 35 def get_task_by_type(type): 36 # TODO: we need to tell dbt-server what tasks are available 37 from dbt.task.run import RunTask 38 from dbt.task.list import ListTask 39 from dbt.task.seed import SeedTask 40 from dbt.task.test import TestTask 41 from dbt.task.build import BuildTask 42 from dbt.task.snapshot import SnapshotTask 43 from dbt.task.run_operation import RunOperationTask 44 45 if type == 'run': 46 return RunTask 47 elif type == 'test': 48 return TestTask 49 elif type == 'list': 50 return ListTask 51 elif type == 'seed': 52 return SeedTask 53 elif type == 'build': 54 return BuildTask 55 elif type == 'snapshot': 56 return SnapshotTask 57 elif type == 'run_operation': 58 return RunOperationTask 59 60 raise RuntimeException('not a valid task') 61 62 63 def create_task(type, args, manifest, config): 64 task = get_task_by_type(type) 65 66 def no_op(*args, **kwargs): 67 pass 68 69 # TODO: yuck, let's rethink tasks a little 70 task = task(args, config) 71 72 # Wow! We can monkeypatch taskCls.load_manifest to return _our_ manifest 73 task.load_manifest = no_op 74 task.manifest = manifest 75 return task 76 77 78 def _get_operation_node(manifest, project_path, sql): 79 from dbt.parser.manifest import process_node 80 from dbt.parser.sql import SqlBlockParser 81 import dbt.adapters.factory 82 83 config = get_dbt_config(project_path) 84 block_parser = SqlBlockParser( 85 project=config, 86 manifest=manifest, 87 root_project=config, 88 ) 89 90 adapter = dbt.adapters.factory.get_adapter(config) 91 # TODO : This needs a real name? 92 sql_node = block_parser.parse_remote(sql, 'name') 93 process_node(config, manifest, sql_node) 94 return config, sql_node, adapter 95 96 97 def compile_sql(manifest, project_path, sql): 98 from dbt.task.sql import SqlCompileRunner 99 100 config, node, adapter = _get_operation_node(manifest, project_path, sql) 101 runner = SqlCompileRunner(config, adapter, node, 1, 1) 102 return runner.safe_run(manifest) 103 104 105 def execute_sql(manifest, project_path, sql): 106 from dbt.task.sql import SqlExecuteRunner 107 108 config, node, adapter = _get_operation_node(manifest, project_path, sql) 109 runner = SqlExecuteRunner(config, adapter, node, 1, 1) 110 # TODO: use same interface for runner 111 return runner.safe_run(manifest) 112 113 114 def parse_to_manifest(config): 115 from dbt.parser.manifest import ManifestLoader 116 117 return ManifestLoader.get_full_manifest(config) 118 119 120 def deserialize_manifest(manifest_msgpack): 121 from dbt.contracts.graph.manifest import Manifest 122 123 return Manifest.from_msgpack(manifest_msgpack) 124 125 126 def serialize_manifest(manifest): 127 # TODO: what should this take as an arg? 128 return manifest.to_msgpack() 129 [end of core/dbt/lib.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/dbt/lib.py b/core/dbt/lib.py --- a/core/dbt/lib.py +++ b/core/dbt/lib.py @@ -12,6 +12,7 @@ def get_dbt_config(project_dir, single_threaded=False): from dbt.config.runtime import RuntimeConfig import dbt.adapters.factory + import dbt.events.functions if os.getenv('DBT_PROFILES_DIR'): profiles_dir = os.getenv('DBT_PROFILES_DIR') @@ -28,6 +29,8 @@ dbt.adapters.factory.reset_adapters() # Load the relevant adapter dbt.adapters.factory.register_adapter(config) + # Set invocation id + dbt.events.functions.set_invocation_id() return config
{"golden_diff": "diff --git a/core/dbt/lib.py b/core/dbt/lib.py\n--- a/core/dbt/lib.py\n+++ b/core/dbt/lib.py\n@@ -12,6 +12,7 @@\n def get_dbt_config(project_dir, single_threaded=False):\n from dbt.config.runtime import RuntimeConfig\n import dbt.adapters.factory\n+ import dbt.events.functions\n \n if os.getenv('DBT_PROFILES_DIR'):\n profiles_dir = os.getenv('DBT_PROFILES_DIR')\n@@ -28,6 +29,8 @@\n dbt.adapters.factory.reset_adapters()\n # Load the relevant adapter\n dbt.adapters.factory.register_adapter(config)\n+ # Set invocation id\n+ dbt.events.functions.set_invocation_id()\n \n return config\n", "issue": "Ensure that all dbt invocations have an invocation_id\nThis is a sub-issue of #4260. \n\nIn order to make sure we can distinguish logs from different runs we should always have an invocation_id. If tracking is off, it isn't created today so we would need to create it. (to add it to logging, add it as a class property on the superclass) (previously considered: preserve process id like it is today, global run_id reported at start, or allow some sort of user markers).\n", "before_files": [{"content": "# TODO: this file is one big TODO\nimport os\nfrom dbt.exceptions import RuntimeException\nfrom dbt import flags\nfrom collections import namedtuple\n\nRuntimeArgs = namedtuple(\n 'RuntimeArgs', 'project_dir profiles_dir single_threaded profile_name'\n)\n\n\ndef get_dbt_config(project_dir, single_threaded=False):\n from dbt.config.runtime import RuntimeConfig\n import dbt.adapters.factory\n\n if os.getenv('DBT_PROFILES_DIR'):\n profiles_dir = os.getenv('DBT_PROFILES_DIR')\n else:\n profiles_dir = os.path.expanduser(\"~/.dbt\")\n\n # Construct a phony config\n config = RuntimeConfig.from_args(RuntimeArgs(\n project_dir, profiles_dir, single_threaded, 'user'\n ))\n # Clear previously registered adapters--\n # this fixes cacheing behavior on the dbt-server\n flags.set_from_args('', config)\n dbt.adapters.factory.reset_adapters()\n # Load the relevant adapter\n dbt.adapters.factory.register_adapter(config)\n\n return config\n\n\ndef get_task_by_type(type):\n # TODO: we need to tell dbt-server what tasks are available\n from dbt.task.run import RunTask\n from dbt.task.list import ListTask\n from dbt.task.seed import SeedTask\n from dbt.task.test import TestTask\n from dbt.task.build import BuildTask\n from dbt.task.snapshot import SnapshotTask\n from dbt.task.run_operation import RunOperationTask\n\n if type == 'run':\n return RunTask\n elif type == 'test':\n return TestTask\n elif type == 'list':\n return ListTask\n elif type == 'seed':\n return SeedTask\n elif type == 'build':\n return BuildTask\n elif type == 'snapshot':\n return SnapshotTask\n elif type == 'run_operation':\n return RunOperationTask\n\n raise RuntimeException('not a valid task')\n\n\ndef create_task(type, args, manifest, config):\n task = get_task_by_type(type)\n\n def no_op(*args, **kwargs):\n pass\n\n # TODO: yuck, let's rethink tasks a little\n task = task(args, config)\n\n # Wow! We can monkeypatch taskCls.load_manifest to return _our_ manifest\n task.load_manifest = no_op\n task.manifest = manifest\n return task\n\n\ndef _get_operation_node(manifest, project_path, sql):\n from dbt.parser.manifest import process_node\n from dbt.parser.sql import SqlBlockParser\n import dbt.adapters.factory\n\n config = get_dbt_config(project_path)\n block_parser = SqlBlockParser(\n project=config,\n manifest=manifest,\n root_project=config,\n )\n\n adapter = dbt.adapters.factory.get_adapter(config)\n # TODO : This needs a real name?\n sql_node = block_parser.parse_remote(sql, 'name')\n process_node(config, manifest, sql_node)\n return config, sql_node, adapter\n\n\ndef compile_sql(manifest, project_path, sql):\n from dbt.task.sql import SqlCompileRunner\n\n config, node, adapter = _get_operation_node(manifest, project_path, sql)\n runner = SqlCompileRunner(config, adapter, node, 1, 1)\n return runner.safe_run(manifest)\n\n\ndef execute_sql(manifest, project_path, sql):\n from dbt.task.sql import SqlExecuteRunner\n\n config, node, adapter = _get_operation_node(manifest, project_path, sql)\n runner = SqlExecuteRunner(config, adapter, node, 1, 1)\n # TODO: use same interface for runner\n return runner.safe_run(manifest)\n\n\ndef parse_to_manifest(config):\n from dbt.parser.manifest import ManifestLoader\n\n return ManifestLoader.get_full_manifest(config)\n\n\ndef deserialize_manifest(manifest_msgpack):\n from dbt.contracts.graph.manifest import Manifest\n\n return Manifest.from_msgpack(manifest_msgpack)\n\n\ndef serialize_manifest(manifest):\n # TODO: what should this take as an arg?\n return manifest.to_msgpack()\n", "path": "core/dbt/lib.py"}]}
1,808
167
gh_patches_debug_5450
rasdani/github-patches
git_diff
mozilla__bugbug-1713
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Set up Sentry for bugbug Adding it will help investigate issues (like the Mercurial woes). </issue> <code> [start of http_service/bugbug_http/worker.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # This Source Code Form is subject to the terms of the Mozilla Public 4 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 5 # You can obtain one at http://mozilla.org/MPL/2.0/. 6 7 import os 8 import sys 9 10 from redis import Redis 11 from rq import Connection, Worker 12 13 import bugbug_http.boot 14 15 16 def main(): 17 # Bootstrap the worker assets 18 bugbug_http.boot.boot_worker() 19 20 # Provide queue names to listen to as arguments to this script, 21 # similar to rq worker 22 redis_url = os.environ.get("REDIS_URL", "redis://localhost/0") 23 redis_conn = Redis.from_url(redis_url) 24 with Connection(connection=redis_conn): 25 qs = sys.argv[1:] or ["default"] 26 27 w = Worker(qs) 28 w.work() 29 30 31 if __name__ == "__main__": 32 main() 33 [end of http_service/bugbug_http/worker.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/http_service/bugbug_http/worker.py b/http_service/bugbug_http/worker.py --- a/http_service/bugbug_http/worker.py +++ b/http_service/bugbug_http/worker.py @@ -7,10 +7,20 @@ import os import sys +import sentry_sdk from redis import Redis from rq import Connection, Worker +from sentry_sdk.integrations.rq import RqIntegration import bugbug_http.boot +from bugbug import get_bugbug_version + +if os.environ.get("SENTRY_DSN"): + sentry_sdk.init( + os.environ.get("SENTRY_DSN"), + integrations=[RqIntegration()], + release=get_bugbug_version(), + ) def main():
{"golden_diff": "diff --git a/http_service/bugbug_http/worker.py b/http_service/bugbug_http/worker.py\n--- a/http_service/bugbug_http/worker.py\n+++ b/http_service/bugbug_http/worker.py\n@@ -7,10 +7,20 @@\n import os\n import sys\n \n+import sentry_sdk\n from redis import Redis\n from rq import Connection, Worker\n+from sentry_sdk.integrations.rq import RqIntegration\n \n import bugbug_http.boot\n+from bugbug import get_bugbug_version\n+\n+if os.environ.get(\"SENTRY_DSN\"):\n+ sentry_sdk.init(\n+ os.environ.get(\"SENTRY_DSN\"),\n+ integrations=[RqIntegration()],\n+ release=get_bugbug_version(),\n+ )\n \n \n def main():\n", "issue": "Set up Sentry for bugbug\nAdding it will help investigate issues (like the Mercurial woes).\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport os\nimport sys\n\nfrom redis import Redis\nfrom rq import Connection, Worker\n\nimport bugbug_http.boot\n\n\ndef main():\n # Bootstrap the worker assets\n bugbug_http.boot.boot_worker()\n\n # Provide queue names to listen to as arguments to this script,\n # similar to rq worker\n redis_url = os.environ.get(\"REDIS_URL\", \"redis://localhost/0\")\n redis_conn = Redis.from_url(redis_url)\n with Connection(connection=redis_conn):\n qs = sys.argv[1:] or [\"default\"]\n\n w = Worker(qs)\n w.work()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "http_service/bugbug_http/worker.py"}]}
833
170
gh_patches_debug_58376
rasdani/github-patches
git_diff
deis__deis-323
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `deis run` generates ugly error if app code not yet pushed I ran `deis run ls -la` after I had created the app, but before I had pushed the code with `git push deis master`. Here is the error I received: ``` ben$ example-python-flask > deis run ls -la Warning: non-zero return code 255 lxc-start: No such file or directory - failed to mount '/opt/deis/runtime/slugs/hushed-sailfish-1/app' on '/usr/lib/lxc/root///app' lxc-start: failed to setup the mount entries for '5f4f4d932501338fa2062d52e5893dfbd3933fa09102c67493a169a2a87ee479' lxc-start: failed to setup the container lxc-start: invalid sequence number 1. expected 2 lxc-start: failed to spawn '5f4f4d932501338fa2062d52e5893dfbd3933fa09102c67493a169a2a87ee479' lxc-start: Device or resource busy - failed to remove cgroup '/sys/fs/cgroup/cpuset//lxc/5f4f4d932501338fa2062d52e5893dfbd3933fa09102c67493a169a2a87ee479' ``` </issue> <code> [start of api/tasks.py] 1 2 from __future__ import unicode_literals 3 import importlib 4 5 from celery import task 6 from celery.canvas import group 7 8 from deis import settings 9 from provider import import_provider_module 10 11 # import user-defined config management module 12 CM = importlib.import_module(settings.CM_MODULE) 13 14 15 @task 16 def build_layer(layer): 17 provider = import_provider_module(layer.flavor.provider.type) 18 provider.build_layer(layer.flat()) 19 20 21 @task 22 def destroy_layer(layer): 23 provider = import_provider_module(layer.flavor.provider.type) 24 provider.destroy_layer(layer.flat()) 25 layer.delete() 26 27 28 @task 29 def build_node(node): 30 provider = import_provider_module(node.layer.flavor.provider.type) 31 provider_id, fqdn, metadata = provider.build_node(node.flat()) 32 node.provider_id = provider_id 33 node.fqdn = fqdn 34 node.metadata = metadata 35 node.save() 36 CM.bootstrap_node(node.flat()) 37 38 39 @task 40 def destroy_node(node): 41 provider = import_provider_module(node.layer.flavor.provider.type) 42 provider.destroy_node(node.flat()) 43 CM.purge_node(node.flat()) 44 node.delete() 45 46 47 @task 48 def converge_node(node): 49 output, rc = CM.converge_node(node.flat()) 50 return output, rc 51 52 53 @task 54 def run_node(node, command): 55 output, rc = CM.run_node(node.flat(), command) 56 return output, rc 57 58 59 @task 60 def build_formation(formation): 61 return 62 63 64 @task 65 def destroy_formation(formation): 66 app_tasks = [destroy_app.si(a) for a in formation.app_set.all()] 67 node_tasks = [destroy_node.si(n) for n in formation.node_set.all()] 68 layer_tasks = [destroy_layer.si(l) for l in formation.layer_set.all()] 69 group(app_tasks + node_tasks).apply_async().join() 70 group(layer_tasks).apply_async().join() 71 CM.purge_formation(formation.flat()) 72 formation.delete() 73 74 75 @task 76 def converge_formation(formation): 77 nodes = formation.node_set.all() 78 subtasks = [] 79 for n in nodes: 80 subtask = converge_node.si(n) 81 subtasks.append(subtask) 82 group(*subtasks).apply_async().join() 83 84 85 @task 86 def build_app(app): 87 return 88 89 90 @task 91 def destroy_app(app): 92 CM.purge_app(app.flat()) 93 app.delete() 94 app.formation.publish() 95 96 97 @task 98 def converge_controller(): 99 CM.converge_controller() 100 return None 101 [end of api/tasks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/api/tasks.py b/api/tasks.py --- a/api/tasks.py +++ b/api/tasks.py @@ -53,6 +53,8 @@ @task def run_node(node, command): output, rc = CM.run_node(node.flat(), command) + if rc != 0 and 'failed to setup the container' in output: + output = '\033[35mPlease run `git push deis master` first.\033[0m\n' + output return output, rc
{"golden_diff": "diff --git a/api/tasks.py b/api/tasks.py\n--- a/api/tasks.py\n+++ b/api/tasks.py\n@@ -53,6 +53,8 @@\n @task\n def run_node(node, command):\n output, rc = CM.run_node(node.flat(), command)\n+ if rc != 0 and 'failed to setup the container' in output:\n+ output = '\\033[35mPlease run `git push deis master` first.\\033[0m\\n' + output\n return output, rc\n", "issue": "`deis run` generates ugly error if app code not yet pushed\nI ran `deis run ls -la` after I had created the app, but before I had pushed the code with `git push deis master`. Here is the error I received:\n\n```\nben$ example-python-flask > deis run ls -la\nWarning: non-zero return code 255\nlxc-start: No such file or directory - failed to mount '/opt/deis/runtime/slugs/hushed-sailfish-1/app' on '/usr/lib/lxc/root///app'\nlxc-start: failed to setup the mount entries for '5f4f4d932501338fa2062d52e5893dfbd3933fa09102c67493a169a2a87ee479'\nlxc-start: failed to setup the container\nlxc-start: invalid sequence number 1. expected 2\nlxc-start: failed to spawn '5f4f4d932501338fa2062d52e5893dfbd3933fa09102c67493a169a2a87ee479'\nlxc-start: Device or resource busy - failed to remove cgroup '/sys/fs/cgroup/cpuset//lxc/5f4f4d932501338fa2062d52e5893dfbd3933fa09102c67493a169a2a87ee479'\n```\n\n", "before_files": [{"content": "\nfrom __future__ import unicode_literals\nimport importlib\n\nfrom celery import task\nfrom celery.canvas import group\n\nfrom deis import settings\nfrom provider import import_provider_module\n\n# import user-defined config management module\nCM = importlib.import_module(settings.CM_MODULE)\n\n\n@task\ndef build_layer(layer):\n provider = import_provider_module(layer.flavor.provider.type)\n provider.build_layer(layer.flat())\n\n\n@task\ndef destroy_layer(layer):\n provider = import_provider_module(layer.flavor.provider.type)\n provider.destroy_layer(layer.flat())\n layer.delete()\n\n\n@task\ndef build_node(node):\n provider = import_provider_module(node.layer.flavor.provider.type)\n provider_id, fqdn, metadata = provider.build_node(node.flat())\n node.provider_id = provider_id\n node.fqdn = fqdn\n node.metadata = metadata\n node.save()\n CM.bootstrap_node(node.flat())\n\n\n@task\ndef destroy_node(node):\n provider = import_provider_module(node.layer.flavor.provider.type)\n provider.destroy_node(node.flat())\n CM.purge_node(node.flat())\n node.delete()\n\n\n@task\ndef converge_node(node):\n output, rc = CM.converge_node(node.flat())\n return output, rc\n\n\n@task\ndef run_node(node, command):\n output, rc = CM.run_node(node.flat(), command)\n return output, rc\n\n\n@task\ndef build_formation(formation):\n return\n\n\n@task\ndef destroy_formation(formation):\n app_tasks = [destroy_app.si(a) for a in formation.app_set.all()]\n node_tasks = [destroy_node.si(n) for n in formation.node_set.all()]\n layer_tasks = [destroy_layer.si(l) for l in formation.layer_set.all()]\n group(app_tasks + node_tasks).apply_async().join()\n group(layer_tasks).apply_async().join()\n CM.purge_formation(formation.flat())\n formation.delete()\n\n\n@task\ndef converge_formation(formation):\n nodes = formation.node_set.all()\n subtasks = []\n for n in nodes:\n subtask = converge_node.si(n)\n subtasks.append(subtask)\n group(*subtasks).apply_async().join()\n\n\n@task\ndef build_app(app):\n return\n\n\n@task\ndef destroy_app(app):\n CM.purge_app(app.flat())\n app.delete()\n app.formation.publish()\n\n\n@task\ndef converge_controller():\n CM.converge_controller()\n return None\n", "path": "api/tasks.py"}]}
1,631
118
gh_patches_debug_15911
rasdani/github-patches
git_diff
pallets__click-2599
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> deprecate `__version__` attribute The `__version__` attribute is an old pattern from early in Python packaging. Setuptools eventually made it easier to use the pattern by allowing reading the value from the attribute at build time, and some other build backends have done the same. However, there's no reason to expose this directly in code anymore. It's usually easier to use feature detection (`hasattr`, `try/except`) instead. `importlib.metadata.version("click")` can be used to get the version at runtime in a standard way, if it's really needed. </issue> <code> [start of src/click/__init__.py] 1 """ 2 Click is a simple Python module inspired by the stdlib optparse to make 3 writing command line scripts fun. Unlike other modules, it's based 4 around a simple API that does not come with too much magic and is 5 composable. 6 """ 7 from __future__ import annotations 8 9 from .core import Argument as Argument 10 from .core import Command as Command 11 from .core import CommandCollection as CommandCollection 12 from .core import Context as Context 13 from .core import Group as Group 14 from .core import Option as Option 15 from .core import Parameter as Parameter 16 from .decorators import argument as argument 17 from .decorators import command as command 18 from .decorators import confirmation_option as confirmation_option 19 from .decorators import group as group 20 from .decorators import help_option as help_option 21 from .decorators import make_pass_decorator as make_pass_decorator 22 from .decorators import option as option 23 from .decorators import pass_context as pass_context 24 from .decorators import pass_obj as pass_obj 25 from .decorators import password_option as password_option 26 from .decorators import version_option as version_option 27 from .exceptions import Abort as Abort 28 from .exceptions import BadArgumentUsage as BadArgumentUsage 29 from .exceptions import BadOptionUsage as BadOptionUsage 30 from .exceptions import BadParameter as BadParameter 31 from .exceptions import ClickException as ClickException 32 from .exceptions import FileError as FileError 33 from .exceptions import MissingParameter as MissingParameter 34 from .exceptions import NoSuchOption as NoSuchOption 35 from .exceptions import UsageError as UsageError 36 from .formatting import HelpFormatter as HelpFormatter 37 from .formatting import wrap_text as wrap_text 38 from .globals import get_current_context as get_current_context 39 from .termui import clear as clear 40 from .termui import confirm as confirm 41 from .termui import echo_via_pager as echo_via_pager 42 from .termui import edit as edit 43 from .termui import getchar as getchar 44 from .termui import launch as launch 45 from .termui import pause as pause 46 from .termui import progressbar as progressbar 47 from .termui import prompt as prompt 48 from .termui import secho as secho 49 from .termui import style as style 50 from .termui import unstyle as unstyle 51 from .types import BOOL as BOOL 52 from .types import Choice as Choice 53 from .types import DateTime as DateTime 54 from .types import File as File 55 from .types import FLOAT as FLOAT 56 from .types import FloatRange as FloatRange 57 from .types import INT as INT 58 from .types import IntRange as IntRange 59 from .types import ParamType as ParamType 60 from .types import Path as Path 61 from .types import STRING as STRING 62 from .types import Tuple as Tuple 63 from .types import UNPROCESSED as UNPROCESSED 64 from .types import UUID as UUID 65 from .utils import echo as echo 66 from .utils import format_filename as format_filename 67 from .utils import get_app_dir as get_app_dir 68 from .utils import get_binary_stream as get_binary_stream 69 from .utils import get_text_stream as get_text_stream 70 from .utils import open_file as open_file 71 72 __version__ = "8.2.0.dev0" 73 74 75 def __getattr__(name: str) -> object: 76 import warnings 77 78 if name == "BaseCommand": 79 from .core import _BaseCommand 80 81 warnings.warn( 82 "'BaseCommand' is deprecated and will be removed in Click 9.0. Use" 83 " 'Command' instead.", 84 DeprecationWarning, 85 stacklevel=2, 86 ) 87 return _BaseCommand 88 89 if name == "MultiCommand": 90 from .core import _MultiCommand 91 92 warnings.warn( 93 "'MultiCommand' is deprecated and will be removed in Click 9.0. Use" 94 " 'Group' instead.", 95 DeprecationWarning, 96 stacklevel=2, 97 ) 98 return _MultiCommand 99 100 if name == "OptionParser": 101 from .parser import _OptionParser 102 103 warnings.warn( 104 "'OptionParser' is deprecated and will be removed in Click 9.0. The" 105 " old parser is available in 'optparse'.", 106 DeprecationWarning, 107 stacklevel=2, 108 ) 109 return _OptionParser 110 111 raise AttributeError(name) 112 [end of src/click/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/click/__init__.py b/src/click/__init__.py --- a/src/click/__init__.py +++ b/src/click/__init__.py @@ -69,8 +69,6 @@ from .utils import get_text_stream as get_text_stream from .utils import open_file as open_file -__version__ = "8.2.0.dev0" - def __getattr__(name: str) -> object: import warnings @@ -108,4 +106,17 @@ ) return _OptionParser + if name == "__version__": + import importlib.metadata + import warnings + + warnings.warn( + "The '__version__' attribute is deprecated and will be removed in" + " Click 9.1. Use feature detection or" + " 'importlib.metadata.version(\"click\")' instead.", + DeprecationWarning, + stacklevel=2, + ) + return importlib.metadata.version("click") + raise AttributeError(name)
{"golden_diff": "diff --git a/src/click/__init__.py b/src/click/__init__.py\n--- a/src/click/__init__.py\n+++ b/src/click/__init__.py\n@@ -69,8 +69,6 @@\n from .utils import get_text_stream as get_text_stream\n from .utils import open_file as open_file\n \n-__version__ = \"8.2.0.dev0\"\n-\n \n def __getattr__(name: str) -> object:\n import warnings\n@@ -108,4 +106,17 @@\n )\n return _OptionParser\n \n+ if name == \"__version__\":\n+ import importlib.metadata\n+ import warnings\n+\n+ warnings.warn(\n+ \"The '__version__' attribute is deprecated and will be removed in\"\n+ \" Click 9.1. Use feature detection or\"\n+ \" 'importlib.metadata.version(\\\"click\\\")' instead.\",\n+ DeprecationWarning,\n+ stacklevel=2,\n+ )\n+ return importlib.metadata.version(\"click\")\n+\n raise AttributeError(name)\n", "issue": "deprecate `__version__` attribute\nThe `__version__` attribute is an old pattern from early in Python packaging. Setuptools eventually made it easier to use the pattern by allowing reading the value from the attribute at build time, and some other build backends have done the same.\r\n\r\nHowever, there's no reason to expose this directly in code anymore. It's usually easier to use feature detection (`hasattr`, `try/except`) instead. `importlib.metadata.version(\"click\")` can be used to get the version at runtime in a standard way, if it's really needed.\n", "before_files": [{"content": "\"\"\"\nClick is a simple Python module inspired by the stdlib optparse to make\nwriting command line scripts fun. Unlike other modules, it's based\naround a simple API that does not come with too much magic and is\ncomposable.\n\"\"\"\nfrom __future__ import annotations\n\nfrom .core import Argument as Argument\nfrom .core import Command as Command\nfrom .core import CommandCollection as CommandCollection\nfrom .core import Context as Context\nfrom .core import Group as Group\nfrom .core import Option as Option\nfrom .core import Parameter as Parameter\nfrom .decorators import argument as argument\nfrom .decorators import command as command\nfrom .decorators import confirmation_option as confirmation_option\nfrom .decorators import group as group\nfrom .decorators import help_option as help_option\nfrom .decorators import make_pass_decorator as make_pass_decorator\nfrom .decorators import option as option\nfrom .decorators import pass_context as pass_context\nfrom .decorators import pass_obj as pass_obj\nfrom .decorators import password_option as password_option\nfrom .decorators import version_option as version_option\nfrom .exceptions import Abort as Abort\nfrom .exceptions import BadArgumentUsage as BadArgumentUsage\nfrom .exceptions import BadOptionUsage as BadOptionUsage\nfrom .exceptions import BadParameter as BadParameter\nfrom .exceptions import ClickException as ClickException\nfrom .exceptions import FileError as FileError\nfrom .exceptions import MissingParameter as MissingParameter\nfrom .exceptions import NoSuchOption as NoSuchOption\nfrom .exceptions import UsageError as UsageError\nfrom .formatting import HelpFormatter as HelpFormatter\nfrom .formatting import wrap_text as wrap_text\nfrom .globals import get_current_context as get_current_context\nfrom .termui import clear as clear\nfrom .termui import confirm as confirm\nfrom .termui import echo_via_pager as echo_via_pager\nfrom .termui import edit as edit\nfrom .termui import getchar as getchar\nfrom .termui import launch as launch\nfrom .termui import pause as pause\nfrom .termui import progressbar as progressbar\nfrom .termui import prompt as prompt\nfrom .termui import secho as secho\nfrom .termui import style as style\nfrom .termui import unstyle as unstyle\nfrom .types import BOOL as BOOL\nfrom .types import Choice as Choice\nfrom .types import DateTime as DateTime\nfrom .types import File as File\nfrom .types import FLOAT as FLOAT\nfrom .types import FloatRange as FloatRange\nfrom .types import INT as INT\nfrom .types import IntRange as IntRange\nfrom .types import ParamType as ParamType\nfrom .types import Path as Path\nfrom .types import STRING as STRING\nfrom .types import Tuple as Tuple\nfrom .types import UNPROCESSED as UNPROCESSED\nfrom .types import UUID as UUID\nfrom .utils import echo as echo\nfrom .utils import format_filename as format_filename\nfrom .utils import get_app_dir as get_app_dir\nfrom .utils import get_binary_stream as get_binary_stream\nfrom .utils import get_text_stream as get_text_stream\nfrom .utils import open_file as open_file\n\n__version__ = \"8.2.0.dev0\"\n\n\ndef __getattr__(name: str) -> object:\n import warnings\n\n if name == \"BaseCommand\":\n from .core import _BaseCommand\n\n warnings.warn(\n \"'BaseCommand' is deprecated and will be removed in Click 9.0. Use\"\n \" 'Command' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return _BaseCommand\n\n if name == \"MultiCommand\":\n from .core import _MultiCommand\n\n warnings.warn(\n \"'MultiCommand' is deprecated and will be removed in Click 9.0. Use\"\n \" 'Group' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return _MultiCommand\n\n if name == \"OptionParser\":\n from .parser import _OptionParser\n\n warnings.warn(\n \"'OptionParser' is deprecated and will be removed in Click 9.0. The\"\n \" old parser is available in 'optparse'.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return _OptionParser\n\n raise AttributeError(name)\n", "path": "src/click/__init__.py"}]}
1,797
233
gh_patches_debug_1752
rasdani/github-patches
git_diff
spack__spack-4809
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> problem with xproto the xproto could be compile properly but while installing, i come across the following problem ``` ==> 'make' '-j8' /Applications/Xcode.app/Contents/Developer/usr/bin/make all-recursive Making all in specs Making all in SIAddresses make[3]: Nothing to be done for `all'. make[3]: Nothing to be done for `all-am'. make[2]: Nothing to be done for `all-am'. ==> 'make' '-j8' 'install' Making install in specs Making install in SIAddresses make[3]: Nothing to be done for `install-exec-am'. make[3]: Nothing to be done for `install-data-am'. make[3]: Nothing to be done for `install-exec-am'. /spack/var/spack/stage/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/xproto-7.0.31/install-sh -c -d '/spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc/xproto' /spack/var/spack/stage/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/xproto-7.0.31/install-sh -c -d '/spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc/xproto' mkdir: /spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share: File exists mkdir: /spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc: File exists mkdir: /spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc/xproto: File exists make[3]: *** [install-dist_shelfDATA] Error 1 make[3]: *** Waiting for unfinished jobs.... make[2]: *** [install-am] Error 2 make[1]: *** [install-recursive] Error 1 make: *** [install-recursive] Error 1 ``` </issue> <code> [start of var/spack/repos/builtin/packages/xproto/package.py] 1 ############################################################################## 2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. 3 # Produced at the Lawrence Livermore National Laboratory. 4 # 5 # This file is part of Spack. 6 # Created by Todd Gamblin, [email protected], All rights reserved. 7 # LLNL-CODE-647188 8 # 9 # For details, see https://github.com/llnl/spack 10 # Please also see the NOTICE and LICENSE files for our notice and the LGPL. 11 # 12 # This program is free software; you can redistribute it and/or modify 13 # it under the terms of the GNU Lesser General Public License (as 14 # published by the Free Software Foundation) version 2.1, February 1999. 15 # 16 # This program is distributed in the hope that it will be useful, but 17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF 18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and 19 # conditions of the GNU Lesser General Public License for more details. 20 # 21 # You should have received a copy of the GNU Lesser General Public 22 # License along with this program; if not, write to the Free Software 23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 ############################################################################## 25 from spack import * 26 27 28 class Xproto(AutotoolsPackage): 29 """X Window System Core Protocol. 30 31 This package provides the headers and specification documents defining 32 the X Window System Core Protocol, Version 11. 33 34 It also includes a number of headers that aren't purely protocol related, 35 but are depended upon by many other X Window System packages to provide 36 common definitions and porting layer.""" 37 38 homepage = "http://cgit.freedesktop.org/xorg/proto/x11proto" 39 url = "https://www.x.org/archive/individual/proto/xproto-7.0.31.tar.gz" 40 41 version('7.0.31', '04b925bf9e472c80f9212615cd684f1e') 42 version('7.0.29', '16a78dd2c5ad73011105c96235f6a0af') 43 44 depends_on('[email protected]:', type='build') 45 depends_on('util-macros', type='build') 46 [end of var/spack/repos/builtin/packages/xproto/package.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/var/spack/repos/builtin/packages/xproto/package.py b/var/spack/repos/builtin/packages/xproto/package.py --- a/var/spack/repos/builtin/packages/xproto/package.py +++ b/var/spack/repos/builtin/packages/xproto/package.py @@ -43,3 +43,8 @@ depends_on('[email protected]:', type='build') depends_on('util-macros', type='build') + + def install(self, spec, prefix): + # Installation fails in parallel + # See https://github.com/LLNL/spack/issues/4805 + make('install', parallel=False)
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/xproto/package.py b/var/spack/repos/builtin/packages/xproto/package.py\n--- a/var/spack/repos/builtin/packages/xproto/package.py\n+++ b/var/spack/repos/builtin/packages/xproto/package.py\n@@ -43,3 +43,8 @@\n \n depends_on('[email protected]:', type='build')\n depends_on('util-macros', type='build')\n+\n+ def install(self, spec, prefix):\n+ # Installation fails in parallel\n+ # See https://github.com/LLNL/spack/issues/4805\n+ make('install', parallel=False)\n", "issue": "problem with xproto\nthe xproto could be compile properly but while installing, i come across the following problem\r\n\r\n```\r\n==> 'make' '-j8'\r\n/Applications/Xcode.app/Contents/Developer/usr/bin/make all-recursive\r\nMaking all in specs\r\nMaking all in SIAddresses\r\nmake[3]: Nothing to be done for `all'.\r\nmake[3]: Nothing to be done for `all-am'.\r\nmake[2]: Nothing to be done for `all-am'.\r\n==> 'make' '-j8' 'install'\r\nMaking install in specs\r\nMaking install in SIAddresses\r\nmake[3]: Nothing to be done for `install-exec-am'.\r\nmake[3]: Nothing to be done for `install-data-am'.\r\nmake[3]: Nothing to be done for `install-exec-am'.\r\n /spack/var/spack/stage/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/xproto-7.0.31/install-sh -c -d '/spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc/xproto'\r\n /spack/var/spack/stage/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/xproto-7.0.31/install-sh -c -d '/spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc/xproto'\r\nmkdir: /spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share: File exists\r\nmkdir: /spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc: File exists\r\nmkdir: /spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc/xproto: File exists\r\nmake[3]: *** [install-dist_shelfDATA] Error 1\r\nmake[3]: *** Waiting for unfinished jobs....\r\nmake[2]: *** [install-am] Error 2\r\nmake[1]: *** [install-recursive] Error 1\r\nmake: *** [install-recursive] Error 1\r\n```\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Xproto(AutotoolsPackage):\n \"\"\"X Window System Core Protocol.\n\n This package provides the headers and specification documents defining\n the X Window System Core Protocol, Version 11.\n\n It also includes a number of headers that aren't purely protocol related,\n but are depended upon by many other X Window System packages to provide\n common definitions and porting layer.\"\"\"\n\n homepage = \"http://cgit.freedesktop.org/xorg/proto/x11proto\"\n url = \"https://www.x.org/archive/individual/proto/xproto-7.0.31.tar.gz\"\n\n version('7.0.31', '04b925bf9e472c80f9212615cd684f1e')\n version('7.0.29', '16a78dd2c5ad73011105c96235f6a0af')\n\n depends_on('[email protected]:', type='build')\n depends_on('util-macros', type='build')\n", "path": "var/spack/repos/builtin/packages/xproto/package.py"}]}
1,805
148
gh_patches_debug_932
rasdani/github-patches
git_diff
praw-dev__praw-1304
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Sphinx stops emitting warnings if it encounters only one **Describe the bug** <!-- A clear and concise description of what the bug is. --> When running pre_push, if Sphinx runs into an warning, it does does print any more. When there are lots of warnings, it takes a lot of time to re-run pre_push per warning I recommend adding the command line argument `--keep-going`. This will cause it to print all warnings. **System Info** - PRAW Version: Latest </issue> <code> [start of pre_push.py] 1 #!/usr/bin/env python3 2 """Run static analysis on the project.""" 3 4 import argparse 5 import sys 6 from os import path 7 from shutil import rmtree 8 from subprocess import CalledProcessError, check_call 9 from tempfile import mkdtemp 10 11 current_directory = path.abspath(path.join(__file__, "..")) 12 13 14 def do_process(args, shell=False): 15 """Run program provided by args. 16 17 Return True on success. 18 19 Output failed message on non-zero exit and return False. 20 21 Exit if command is not found. 22 """ 23 print("Running: {}".format(" ".join(args))) 24 try: 25 check_call(args, shell=shell) 26 except CalledProcessError: 27 print("\nFailed: {}".format(" ".join(args))) 28 return False 29 except Exception as exc: 30 sys.stderr.write(str(exc) + "\n") 31 sys.exit(1) 32 return True 33 34 35 def run_static(): 36 """Runs the static tests. 37 38 Returns a statuscode of 0 if everything ran correctly. 39 Otherwise, it will return statuscode 1 40 """ 41 success = True 42 success &= do_process( 43 [ 44 sys.executable, 45 path.join(current_directory, "tools", "static_word_checks.py"), 46 "--replace", 47 ] 48 ) 49 success &= do_process(["black ."], shell=True) 50 success &= do_process(["flake8", "--exclude=.eggs,build,docs"]) 51 success &= do_process(["pydocstyle", "praw"]) 52 # success &= do_process(["pylint", "--rcfile=.pylintrc", "praw"]) 53 54 tmp_dir = mkdtemp() 55 try: 56 success &= do_process(["sphinx-build", "-W", "docs", tmp_dir]) 57 finally: 58 rmtree(tmp_dir) 59 60 return success 61 62 63 def run_unit(): 64 """Runs the unit-tests. 65 66 Follows the behavior of the static tests, 67 where any failed tests cause pre_push.py to fail. 68 """ 69 return do_process( 70 [sys.executable, path.join(current_directory, "setup.py"), "test"] 71 ) 72 73 74 def main(): 75 """Runs the main function. 76 77 usage: pre_push.py [-h] [-n] [-u] [-a] 78 79 Run static and/or unit-tests 80 """ 81 parser = argparse.ArgumentParser( 82 description="Run static and/or unit-tests" 83 ) 84 parser.add_argument( 85 "-n", 86 "--unstatic", 87 action="store_true", 88 help="Do not run static tests (black/flake8/pydocstyle/sphinx-build)", 89 default=False, 90 ) 91 parser.add_argument( 92 "-u", 93 "--unit-tests", 94 "--unit", 95 action="store_true", 96 default=False, 97 help="Run the unit tests", 98 ) 99 parser.add_argument( 100 "-a", 101 "--all", 102 action="store_true", 103 default=False, 104 help="Run all of the tests (static and unit). " 105 "Overrides the unstatic argument.", 106 ) 107 args = parser.parse_args() 108 success = True 109 try: 110 if not args.unstatic or args.all: 111 success &= run_static() 112 if args.all or args.unit_tests: 113 success &= run_unit() 114 except KeyboardInterrupt: 115 return int(not False) 116 return int(not success) 117 118 119 if __name__ == "__main__": 120 exit_code = main() 121 print( 122 "\npre_push.py: Success!" if not exit_code else "\npre_push.py: Fail" 123 ) 124 sys.exit(exit_code) 125 [end of pre_push.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_push.py b/pre_push.py --- a/pre_push.py +++ b/pre_push.py @@ -53,7 +53,9 @@ tmp_dir = mkdtemp() try: - success &= do_process(["sphinx-build", "-W", "docs", tmp_dir]) + success &= do_process( + ["sphinx-build", "-W", "--keep-going", "docs", tmp_dir] + ) finally: rmtree(tmp_dir)
{"golden_diff": "diff --git a/pre_push.py b/pre_push.py\n--- a/pre_push.py\n+++ b/pre_push.py\n@@ -53,7 +53,9 @@\n \n tmp_dir = mkdtemp()\n try:\n- success &= do_process([\"sphinx-build\", \"-W\", \"docs\", tmp_dir])\n+ success &= do_process(\n+ [\"sphinx-build\", \"-W\", \"--keep-going\", \"docs\", tmp_dir]\n+ )\n finally:\n rmtree(tmp_dir)\n", "issue": "Sphinx stops emitting warnings if it encounters only one\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. --> When running pre_push, if Sphinx runs into an warning, it does does print any more. When there are lots of warnings, it takes a lot of time to re-run pre_push per warning\r\n\r\nI recommend adding the command line argument `--keep-going`. This will cause it to print all warnings.\r\n\r\n**System Info**\r\n - PRAW Version: Latest\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Run static analysis on the project.\"\"\"\n\nimport argparse\nimport sys\nfrom os import path\nfrom shutil import rmtree\nfrom subprocess import CalledProcessError, check_call\nfrom tempfile import mkdtemp\n\ncurrent_directory = path.abspath(path.join(__file__, \"..\"))\n\n\ndef do_process(args, shell=False):\n \"\"\"Run program provided by args.\n\n Return True on success.\n\n Output failed message on non-zero exit and return False.\n\n Exit if command is not found.\n \"\"\"\n print(\"Running: {}\".format(\" \".join(args)))\n try:\n check_call(args, shell=shell)\n except CalledProcessError:\n print(\"\\nFailed: {}\".format(\" \".join(args)))\n return False\n except Exception as exc:\n sys.stderr.write(str(exc) + \"\\n\")\n sys.exit(1)\n return True\n\n\ndef run_static():\n \"\"\"Runs the static tests.\n\n Returns a statuscode of 0 if everything ran correctly.\n Otherwise, it will return statuscode 1\n \"\"\"\n success = True\n success &= do_process(\n [\n sys.executable,\n path.join(current_directory, \"tools\", \"static_word_checks.py\"),\n \"--replace\",\n ]\n )\n success &= do_process([\"black .\"], shell=True)\n success &= do_process([\"flake8\", \"--exclude=.eggs,build,docs\"])\n success &= do_process([\"pydocstyle\", \"praw\"])\n # success &= do_process([\"pylint\", \"--rcfile=.pylintrc\", \"praw\"])\n\n tmp_dir = mkdtemp()\n try:\n success &= do_process([\"sphinx-build\", \"-W\", \"docs\", tmp_dir])\n finally:\n rmtree(tmp_dir)\n\n return success\n\n\ndef run_unit():\n \"\"\"Runs the unit-tests.\n\n Follows the behavior of the static tests,\n where any failed tests cause pre_push.py to fail.\n \"\"\"\n return do_process(\n [sys.executable, path.join(current_directory, \"setup.py\"), \"test\"]\n )\n\n\ndef main():\n \"\"\"Runs the main function.\n\n usage: pre_push.py [-h] [-n] [-u] [-a]\n\n Run static and/or unit-tests\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Run static and/or unit-tests\"\n )\n parser.add_argument(\n \"-n\",\n \"--unstatic\",\n action=\"store_true\",\n help=\"Do not run static tests (black/flake8/pydocstyle/sphinx-build)\",\n default=False,\n )\n parser.add_argument(\n \"-u\",\n \"--unit-tests\",\n \"--unit\",\n action=\"store_true\",\n default=False,\n help=\"Run the unit tests\",\n )\n parser.add_argument(\n \"-a\",\n \"--all\",\n action=\"store_true\",\n default=False,\n help=\"Run all of the tests (static and unit). \"\n \"Overrides the unstatic argument.\",\n )\n args = parser.parse_args()\n success = True\n try:\n if not args.unstatic or args.all:\n success &= run_static()\n if args.all or args.unit_tests:\n success &= run_unit()\n except KeyboardInterrupt:\n return int(not False)\n return int(not success)\n\n\nif __name__ == \"__main__\":\n exit_code = main()\n print(\n \"\\npre_push.py: Success!\" if not exit_code else \"\\npre_push.py: Fail\"\n )\n sys.exit(exit_code)\n", "path": "pre_push.py"}]}
1,647
108
gh_patches_debug_314
rasdani/github-patches
git_diff
zulip__zulip-20788
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> "Pan and zoom" cuts off images instead of using the available space If you have a tall image and a wide monitor (and wide browser viewport), and you try to zoom… the image stays trapped inside the same box it occupied before you even tried to zoom. If the image is super wide instead of tall, the same thing happens the other way around. This leads to a lot of frustrating panning around, to look at the different parts of the image through this narrow keyhole, while tons of screen space next to it doesn't get used. This is the biggest of the issues described by @vanclute in #18939. It was reported again by @alexanderglueck as #19837, and I just ran into it myself ([chat](https://chat.zulip.org/#narrow/stream/6-frontend/topic/pan.2Fzoom/near/1308717)). Here's a nice illustration from #19837: ![image](https://user-images.githubusercontent.com/28173/149076010-0776088a-53ef-4e10-97b0-6d621692b9e2.png) Instead, when zooming we should use the full space available. This may be bigger than the area the image occupied when it was scaled down to fit completely in the space available, because the available box may have a different aspect ratio from the image. </issue> <code> [start of version.py] 1 import os 2 3 ZULIP_VERSION = "5.0-dev+git" 4 5 # Add information on number of commits and commit hash to version, if available 6 zulip_git_version_file = os.path.join( 7 os.path.dirname(os.path.abspath(__file__)), "zulip-git-version" 8 ) 9 lines = [ZULIP_VERSION, ""] 10 if os.path.exists(zulip_git_version_file): 11 with open(zulip_git_version_file) as f: 12 lines = f.readlines() + ["", ""] 13 ZULIP_VERSION = lines.pop(0).strip() 14 ZULIP_MERGE_BASE = lines.pop(0).strip() 15 16 LATEST_MAJOR_VERSION = "4.0" 17 LATEST_RELEASE_VERSION = "4.9" 18 LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.com/2021/05/13/zulip-4-0-released/" 19 20 # Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be 21 # prevented from connecting to the Zulip server. Versions above 22 # DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have 23 # a banner at the top of the page asking the user to upgrade. 24 DESKTOP_MINIMUM_VERSION = "5.2.0" 25 DESKTOP_WARNING_VERSION = "5.4.3" 26 27 # Bump the API_FEATURE_LEVEL whenever an API change is made 28 # that clients might want to condition on. If we forget at 29 # the time we make the change, then bump it later as soon 30 # as we notice; clients using API_FEATURE_LEVEL will just not 31 # use the new feature/API until the bump. 32 # 33 # Changes should be accompanied by documentation explaining what the 34 # new level means in templates/zerver/api/changelog.md, as well as 35 # "**Changes**" entries in the endpoint's documentation in `zulip.yaml`. 36 API_FEATURE_LEVEL = 115 37 38 # Bump the minor PROVISION_VERSION to indicate that folks should provision 39 # only when going from an old version of the code to a newer version. Bump 40 # the major version to indicate that folks should provision in both 41 # directions. 42 43 # Typically, 44 # * adding a dependency only requires a minor version bump; 45 # * removing a dependency requires a major version bump; 46 # * upgrading a dependency requires a major version bump, unless the 47 # upgraded dependency is backwards compatible with all of our 48 # historical commits sharing the same major version, in which case a 49 # minor version bump suffices. 50 51 PROVISION_VERSION = "173.3" 52 [end of version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/version.py b/version.py --- a/version.py +++ b/version.py @@ -48,4 +48,4 @@ # historical commits sharing the same major version, in which case a # minor version bump suffices. -PROVISION_VERSION = "173.3" +PROVISION_VERSION = "173.4"
{"golden_diff": "diff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -48,4 +48,4 @@\n # historical commits sharing the same major version, in which case a\n # minor version bump suffices.\n \n-PROVISION_VERSION = \"173.3\"\n+PROVISION_VERSION = \"173.4\"\n", "issue": "\"Pan and zoom\" cuts off images instead of using the available space\nIf you have a tall image and a wide monitor (and wide browser viewport), and you try to zoom\u2026 the image stays trapped inside the same box it occupied before you even tried to zoom. If the image is super wide instead of tall, the same thing happens the other way around.\r\n\r\nThis leads to a lot of frustrating panning around, to look at the different parts of the image through this narrow keyhole, while tons of screen space next to it doesn't get used.\r\n\r\nThis is the biggest of the issues described by @vanclute in #18939. It was reported again by @alexanderglueck as #19837, and I just ran into it myself ([chat](https://chat.zulip.org/#narrow/stream/6-frontend/topic/pan.2Fzoom/near/1308717)). Here's a nice illustration from #19837: \r\n![image](https://user-images.githubusercontent.com/28173/149076010-0776088a-53ef-4e10-97b0-6d621692b9e2.png)\r\n\r\nInstead, when zooming we should use the full space available. This may be bigger than the area the image occupied when it was scaled down to fit completely in the space available, because the available box may have a different aspect ratio from the image.\r\n\n", "before_files": [{"content": "import os\n\nZULIP_VERSION = \"5.0-dev+git\"\n\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"zulip-git-version\"\n)\nlines = [ZULIP_VERSION, \"\"]\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n lines = f.readlines() + [\"\", \"\"]\nZULIP_VERSION = lines.pop(0).strip()\nZULIP_MERGE_BASE = lines.pop(0).strip()\n\nLATEST_MAJOR_VERSION = \"4.0\"\nLATEST_RELEASE_VERSION = \"4.9\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.com/2021/05/13/zulip-4-0-released/\"\n\n# Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be\n# prevented from connecting to the Zulip server. Versions above\n# DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have\n# a banner at the top of the page asking the user to upgrade.\nDESKTOP_MINIMUM_VERSION = \"5.2.0\"\nDESKTOP_WARNING_VERSION = \"5.4.3\"\n\n# Bump the API_FEATURE_LEVEL whenever an API change is made\n# that clients might want to condition on. If we forget at\n# the time we make the change, then bump it later as soon\n# as we notice; clients using API_FEATURE_LEVEL will just not\n# use the new feature/API until the bump.\n#\n# Changes should be accompanied by documentation explaining what the\n# new level means in templates/zerver/api/changelog.md, as well as\n# \"**Changes**\" entries in the endpoint's documentation in `zulip.yaml`.\nAPI_FEATURE_LEVEL = 115\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = \"173.3\"\n", "path": "version.py"}]}
1,498
81
gh_patches_debug_7854
rasdani/github-patches
git_diff
coala__coala-4969
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add docstring for coala_modes.py mode_normal This function should have a proper docstring, and indicate that `log_printer` is unused & deprecated. </issue> <code> [start of coalib/coala_modes.py] 1 def mode_normal(console_printer, log_printer, args, debug=False): 2 import functools 3 4 from coalib.coala_main import run_coala 5 from coalib.output.ConsoleInteraction import ( 6 acquire_settings, nothing_done, 7 print_results, print_section_beginning) 8 9 partial_print_sec_beg = functools.partial( 10 print_section_beginning, 11 console_printer) 12 results, exitcode, _ = run_coala( 13 print_results=print_results, 14 acquire_settings=acquire_settings, 15 print_section_beginning=partial_print_sec_beg, 16 nothing_done=nothing_done, 17 console_printer=console_printer, 18 args=args, 19 debug=debug) 20 21 return exitcode 22 23 24 def mode_non_interactive(console_printer, args, debug=False): 25 import functools 26 27 from coalib.coala_main import run_coala 28 from coalib.output.ConsoleInteraction import ( 29 print_results_no_input, print_section_beginning) 30 31 partial_print_sec_beg = functools.partial( 32 print_section_beginning, 33 console_printer) 34 results, exitcode, _ = run_coala( 35 print_results=print_results_no_input, 36 print_section_beginning=partial_print_sec_beg, 37 force_show_patch=True, 38 console_printer=console_printer, 39 args=args, 40 debug=debug) 41 42 return exitcode 43 44 45 def mode_json(args, debug=False): 46 import json 47 48 from coalib.coala_main import run_coala 49 from coalib.output.Logging import configure_json_logging 50 from coalib.output.JSONEncoder import create_json_encoder 51 52 if args.log_json: 53 log_stream = configure_json_logging() 54 55 JSONEncoder = create_json_encoder(use_relpath=args.relpath) 56 57 results, exitcode, _ = run_coala(args=args, debug=debug) 58 59 retval = {'results': results} 60 61 if args.log_json: 62 retval['logs'] = [json.loads(line) for line in 63 log_stream.getvalue().splitlines()] 64 65 if args.output: 66 filename = str(args.output[0]) 67 with open(filename, 'w') as fp: 68 json.dump(retval, fp, 69 cls=JSONEncoder, 70 sort_keys=True, 71 indent=2, 72 separators=(',', ': ')) 73 else: 74 print(json.dumps(retval, 75 cls=JSONEncoder, 76 sort_keys=True, 77 indent=2, 78 separators=(',', ': '))) 79 80 return 0 if args.show_bears else exitcode 81 82 83 def mode_format(args, debug=False): 84 from coalib.coala_main import run_coala 85 from coalib.output.ConsoleInteraction import print_results_formatted 86 87 _, exitcode, _ = run_coala( 88 print_results=print_results_formatted, args=args, debug=debug) 89 return exitcode 90 [end of coalib/coala_modes.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/coalib/coala_modes.py b/coalib/coala_modes.py --- a/coalib/coala_modes.py +++ b/coalib/coala_modes.py @@ -1,4 +1,13 @@ def mode_normal(console_printer, log_printer, args, debug=False): + """ + This is the default coala mode. User interaction is allowed in this mode. + + :param console_printer: Object to print messages on the console. + :param log_printer: Deprecated. + :param args: Alternative pre-parsed CLI arguments. + :param debug: Run in debug mode, bypassing multiprocessing, + and not catching any exceptions. + """ import functools from coalib.coala_main import run_coala
{"golden_diff": "diff --git a/coalib/coala_modes.py b/coalib/coala_modes.py\n--- a/coalib/coala_modes.py\n+++ b/coalib/coala_modes.py\n@@ -1,4 +1,13 @@\n def mode_normal(console_printer, log_printer, args, debug=False):\n+ \"\"\"\n+ This is the default coala mode. User interaction is allowed in this mode.\n+\n+ :param console_printer: Object to print messages on the console.\n+ :param log_printer: Deprecated.\n+ :param args: Alternative pre-parsed CLI arguments.\n+ :param debug: Run in debug mode, bypassing multiprocessing,\n+ and not catching any exceptions.\n+ \"\"\"\n import functools\n \n from coalib.coala_main import run_coala\n", "issue": "Add docstring for coala_modes.py mode_normal\nThis function should have a proper docstring, and indicate that `log_printer` is unused & deprecated.\n", "before_files": [{"content": "def mode_normal(console_printer, log_printer, args, debug=False):\n import functools\n\n from coalib.coala_main import run_coala\n from coalib.output.ConsoleInteraction import (\n acquire_settings, nothing_done,\n print_results, print_section_beginning)\n\n partial_print_sec_beg = functools.partial(\n print_section_beginning,\n console_printer)\n results, exitcode, _ = run_coala(\n print_results=print_results,\n acquire_settings=acquire_settings,\n print_section_beginning=partial_print_sec_beg,\n nothing_done=nothing_done,\n console_printer=console_printer,\n args=args,\n debug=debug)\n\n return exitcode\n\n\ndef mode_non_interactive(console_printer, args, debug=False):\n import functools\n\n from coalib.coala_main import run_coala\n from coalib.output.ConsoleInteraction import (\n print_results_no_input, print_section_beginning)\n\n partial_print_sec_beg = functools.partial(\n print_section_beginning,\n console_printer)\n results, exitcode, _ = run_coala(\n print_results=print_results_no_input,\n print_section_beginning=partial_print_sec_beg,\n force_show_patch=True,\n console_printer=console_printer,\n args=args,\n debug=debug)\n\n return exitcode\n\n\ndef mode_json(args, debug=False):\n import json\n\n from coalib.coala_main import run_coala\n from coalib.output.Logging import configure_json_logging\n from coalib.output.JSONEncoder import create_json_encoder\n\n if args.log_json:\n log_stream = configure_json_logging()\n\n JSONEncoder = create_json_encoder(use_relpath=args.relpath)\n\n results, exitcode, _ = run_coala(args=args, debug=debug)\n\n retval = {'results': results}\n\n if args.log_json:\n retval['logs'] = [json.loads(line) for line in\n log_stream.getvalue().splitlines()]\n\n if args.output:\n filename = str(args.output[0])\n with open(filename, 'w') as fp:\n json.dump(retval, fp,\n cls=JSONEncoder,\n sort_keys=True,\n indent=2,\n separators=(',', ': '))\n else:\n print(json.dumps(retval,\n cls=JSONEncoder,\n sort_keys=True,\n indent=2,\n separators=(',', ': ')))\n\n return 0 if args.show_bears else exitcode\n\n\ndef mode_format(args, debug=False):\n from coalib.coala_main import run_coala\n from coalib.output.ConsoleInteraction import print_results_formatted\n\n _, exitcode, _ = run_coala(\n print_results=print_results_formatted, args=args, debug=debug)\n return exitcode\n", "path": "coalib/coala_modes.py"}]}
1,321
170
gh_patches_debug_26041
rasdani/github-patches
git_diff
comic__grand-challenge.org-2410
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add a time column to challenge requests table **Is your feature request related to a problem? Please describe.** Its not clear right away when each challenge request was submitted. **Describe the solution you'd like** I would like to add a column that shows the time when challenge request was submitted. It would be handy. **Describe alternatives you've considered** Looking at a specific challenge request page to see the time. **Additional context** Idea shown on the picture below. Color coding is not necessary, but could be nice. ![requesttime](https://user-images.githubusercontent.com/37930396/164007865-b92bb7c9-dc2e-461d-a9d9-857779ed932f.png) </issue> <code> [start of app/grandchallenge/challenges/admin.py] 1 from django.contrib import admin, messages 2 from django.contrib.admin import ModelAdmin 3 from django.core.exceptions import ValidationError 4 5 from grandchallenge.challenges.emails import send_challenge_status_update_email 6 from grandchallenge.challenges.models import ( 7 Challenge, 8 ChallengeRequest, 9 ChallengeSeries, 10 ExternalChallenge, 11 ) 12 13 14 class ChallengeAdmin(ModelAdmin): 15 readonly_fields = ("creator",) 16 autocomplete_fields = ("publications",) 17 ordering = ("-created",) 18 list_display = ("short_name", "created") 19 search_fields = ("short_name",) 20 21 22 class ExternalChallengeAdmin(ModelAdmin): 23 readonly_fields = ("creator",) 24 autocomplete_fields = ("publications",) 25 26 27 class ChallengeRequestAdmin(ModelAdmin): 28 readonly_fields = ("creator",) 29 ordering = ("-created",) 30 list_display = ("title", "short_name", "creator", "created", "status") 31 actions = ["create_challenge", "send_status_update_email"] 32 33 @admin.action(description="Create challenge for this request") 34 def create_challenge(self, request, queryset): 35 for challengerequest in queryset: 36 try: 37 challengerequest.create_challenge() 38 except ValidationError: 39 self.message_user( 40 request, 41 f"There already is a challenge with short " 42 f"name: {challengerequest.short_name}", 43 messages.WARNING, 44 ) 45 46 @admin.action(description="Send status update email to requester") 47 def send_status_update_email(self, request, queryset): 48 for challengerequest in queryset: 49 if ( 50 challengerequest.status 51 == challengerequest.ChallengeRequestStatusChoices.ACCEPTED 52 ): 53 try: 54 challenge = Challenge.objects.get( 55 short_name=challengerequest.short_name 56 ) 57 except Challenge.DoesNotExist: 58 challenge = challengerequest.create_challenge() 59 else: 60 challenge = None 61 send_challenge_status_update_email( 62 challengerequest=challengerequest, challenge=challenge 63 ) 64 65 66 admin.site.register(Challenge, ChallengeAdmin) 67 admin.site.register(ExternalChallenge, ExternalChallengeAdmin) 68 admin.site.register(ChallengeSeries) 69 admin.site.register(ChallengeRequest, ChallengeRequestAdmin) 70 [end of app/grandchallenge/challenges/admin.py] [start of app/grandchallenge/core/templatetags/naturaldelta.py] 1 import humanize 2 from django import template 3 4 register = template.Library() 5 6 7 @register.filter 8 def naturaldelta(value): 9 return humanize.naturaldelta(value, months=False) 10 [end of app/grandchallenge/core/templatetags/naturaldelta.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/grandchallenge/challenges/admin.py b/app/grandchallenge/challenges/admin.py --- a/app/grandchallenge/challenges/admin.py +++ b/app/grandchallenge/challenges/admin.py @@ -27,8 +27,22 @@ class ChallengeRequestAdmin(ModelAdmin): readonly_fields = ("creator",) ordering = ("-created",) - list_display = ("title", "short_name", "creator", "created", "status") + list_display = ( + "title", + "short_name", + "creator", + "created", + "status", + "challenge_type", + "total_cost", + "budget_for_hosting_challenge", + ) actions = ["create_challenge", "send_status_update_email"] + list_filter = ["status", "challenge_type"] + + @admin.display(description="Total cost") + def total_cost(self, obj): + return "%s" % (obj.budget["Total"]) @admin.action(description="Create challenge for this request") def create_challenge(self, request, queryset): diff --git a/app/grandchallenge/core/templatetags/naturaldelta.py b/app/grandchallenge/core/templatetags/naturaldelta.py --- a/app/grandchallenge/core/templatetags/naturaldelta.py +++ b/app/grandchallenge/core/templatetags/naturaldelta.py @@ -1,5 +1,6 @@ import humanize from django import template +from django.utils import timezone register = template.Library() @@ -7,3 +8,8 @@ @register.filter def naturaldelta(value): return humanize.naturaldelta(value, months=False) + + [email protected] +def timedifference(value): + return (timezone.now() - value).days
{"golden_diff": "diff --git a/app/grandchallenge/challenges/admin.py b/app/grandchallenge/challenges/admin.py\n--- a/app/grandchallenge/challenges/admin.py\n+++ b/app/grandchallenge/challenges/admin.py\n@@ -27,8 +27,22 @@\n class ChallengeRequestAdmin(ModelAdmin):\n readonly_fields = (\"creator\",)\n ordering = (\"-created\",)\n- list_display = (\"title\", \"short_name\", \"creator\", \"created\", \"status\")\n+ list_display = (\n+ \"title\",\n+ \"short_name\",\n+ \"creator\",\n+ \"created\",\n+ \"status\",\n+ \"challenge_type\",\n+ \"total_cost\",\n+ \"budget_for_hosting_challenge\",\n+ )\n actions = [\"create_challenge\", \"send_status_update_email\"]\n+ list_filter = [\"status\", \"challenge_type\"]\n+\n+ @admin.display(description=\"Total cost\")\n+ def total_cost(self, obj):\n+ return \"%s\" % (obj.budget[\"Total\"])\n \n @admin.action(description=\"Create challenge for this request\")\n def create_challenge(self, request, queryset):\ndiff --git a/app/grandchallenge/core/templatetags/naturaldelta.py b/app/grandchallenge/core/templatetags/naturaldelta.py\n--- a/app/grandchallenge/core/templatetags/naturaldelta.py\n+++ b/app/grandchallenge/core/templatetags/naturaldelta.py\n@@ -1,5 +1,6 @@\n import humanize\n from django import template\n+from django.utils import timezone\n \n register = template.Library()\n \n@@ -7,3 +8,8 @@\n @register.filter\n def naturaldelta(value):\n return humanize.naturaldelta(value, months=False)\n+\n+\[email protected]\n+def timedifference(value):\n+ return (timezone.now() - value).days\n", "issue": "Add a time column to challenge requests table\n**Is your feature request related to a problem? Please describe.**\r\nIts not clear right away when each challenge request was submitted.\r\n\r\n\r\n**Describe the solution you'd like**\r\nI would like to add a column that shows the time when challenge request was submitted. It would be handy. \r\n\r\n**Describe alternatives you've considered**\r\nLooking at a specific challenge request page to see the time.\r\n\r\n**Additional context**\r\nIdea shown on the picture below. Color coding is not necessary, but could be nice.\r\n![requesttime](https://user-images.githubusercontent.com/37930396/164007865-b92bb7c9-dc2e-461d-a9d9-857779ed932f.png)\n", "before_files": [{"content": "from django.contrib import admin, messages\nfrom django.contrib.admin import ModelAdmin\nfrom django.core.exceptions import ValidationError\n\nfrom grandchallenge.challenges.emails import send_challenge_status_update_email\nfrom grandchallenge.challenges.models import (\n Challenge,\n ChallengeRequest,\n ChallengeSeries,\n ExternalChallenge,\n)\n\n\nclass ChallengeAdmin(ModelAdmin):\n readonly_fields = (\"creator\",)\n autocomplete_fields = (\"publications\",)\n ordering = (\"-created\",)\n list_display = (\"short_name\", \"created\")\n search_fields = (\"short_name\",)\n\n\nclass ExternalChallengeAdmin(ModelAdmin):\n readonly_fields = (\"creator\",)\n autocomplete_fields = (\"publications\",)\n\n\nclass ChallengeRequestAdmin(ModelAdmin):\n readonly_fields = (\"creator\",)\n ordering = (\"-created\",)\n list_display = (\"title\", \"short_name\", \"creator\", \"created\", \"status\")\n actions = [\"create_challenge\", \"send_status_update_email\"]\n\n @admin.action(description=\"Create challenge for this request\")\n def create_challenge(self, request, queryset):\n for challengerequest in queryset:\n try:\n challengerequest.create_challenge()\n except ValidationError:\n self.message_user(\n request,\n f\"There already is a challenge with short \"\n f\"name: {challengerequest.short_name}\",\n messages.WARNING,\n )\n\n @admin.action(description=\"Send status update email to requester\")\n def send_status_update_email(self, request, queryset):\n for challengerequest in queryset:\n if (\n challengerequest.status\n == challengerequest.ChallengeRequestStatusChoices.ACCEPTED\n ):\n try:\n challenge = Challenge.objects.get(\n short_name=challengerequest.short_name\n )\n except Challenge.DoesNotExist:\n challenge = challengerequest.create_challenge()\n else:\n challenge = None\n send_challenge_status_update_email(\n challengerequest=challengerequest, challenge=challenge\n )\n\n\nadmin.site.register(Challenge, ChallengeAdmin)\nadmin.site.register(ExternalChallenge, ExternalChallengeAdmin)\nadmin.site.register(ChallengeSeries)\nadmin.site.register(ChallengeRequest, ChallengeRequestAdmin)\n", "path": "app/grandchallenge/challenges/admin.py"}, {"content": "import humanize\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]\ndef naturaldelta(value):\n return humanize.naturaldelta(value, months=False)\n", "path": "app/grandchallenge/core/templatetags/naturaldelta.py"}]}
1,374
395
gh_patches_debug_32324
rasdani/github-patches
git_diff
modal-labs__modal-examples-695
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> remove duplicate webscraper example once #669 is merged </issue> <code> [start of 10_integrations/webscraper.py] 1 # --- 2 # runtimes: ["runc", "gvisor"] 3 # --- 4 import os 5 6 import modal 7 8 stub = modal.Stub("example-linkscraper") 9 10 11 playwright_image = modal.Image.debian_slim( 12 python_version="3.10" 13 ).run_commands( # Doesn't work with 3.11 yet 14 "apt-get update", 15 "apt-get install -y software-properties-common", 16 "apt-add-repository non-free", 17 "apt-add-repository contrib", 18 "pip install playwright==1.30.0", 19 "playwright install-deps chromium", 20 "playwright install chromium", 21 ) 22 23 24 @stub.function(image=playwright_image) 25 async def get_links(url: str) -> set[str]: 26 from playwright.async_api import async_playwright 27 28 async with async_playwright() as p: 29 browser = await p.chromium.launch() 30 page = await browser.new_page() 31 await page.goto(url) 32 links = await page.eval_on_selector_all( 33 "a[href]", "elements => elements.map(element => element.href)" 34 ) 35 await browser.close() 36 37 return set(links) 38 39 40 slack_sdk_image = modal.Image.debian_slim().pip_install("slack-sdk") 41 42 43 @stub.function( 44 image=slack_sdk_image, 45 secrets=[modal.Secret.from_name("scraper-slack-secret")], 46 ) 47 def bot_token_msg(channel, message): 48 import slack_sdk 49 50 print(f"Posting {message} to #{channel}") 51 client = slack_sdk.WebClient(token=os.environ["SLACK_BOT_TOKEN"]) 52 client.chat_postMessage(channel=channel, text=message) 53 54 55 @stub.function() 56 def scrape(): 57 links_of_interest = ["http://modal.com"] 58 59 for links in get_links.map(links_of_interest): 60 for link in links: 61 bot_token_msg.remote("scraped-links", link) 62 63 64 @stub.function(schedule=modal.Period(days=1)) 65 def daily_scrape(): 66 scrape.remote() 67 68 69 @stub.local_entrypoint() 70 def run(): 71 scrape.remote() 72 [end of 10_integrations/webscraper.py] [start of misc/webscraper.py] 1 # --- 2 # runtimes: ["runc", "gvisor"] 3 # --- 4 import os 5 6 import modal 7 8 stub = modal.Stub("example-linkscraper") 9 10 11 playwright_image = modal.Image.debian_slim( 12 python_version="3.10" 13 ).run_commands( # Doesn't work with 3.11 yet 14 "apt-get update", 15 "apt-get install -y software-properties-common", 16 "apt-add-repository non-free", 17 "apt-add-repository contrib", 18 "pip install playwright==1.30.0", 19 "playwright install-deps chromium", 20 "playwright install chromium", 21 ) 22 23 24 @stub.function(image=playwright_image) 25 async def get_links(url: str) -> set[str]: 26 from playwright.async_api import async_playwright 27 28 async with async_playwright() as p: 29 browser = await p.chromium.launch() 30 page = await browser.new_page() 31 await page.goto(url) 32 links = await page.eval_on_selector_all( 33 "a[href]", "elements => elements.map(element => element.href)" 34 ) 35 await browser.close() 36 37 return set(links) 38 39 40 slack_sdk_image = modal.Image.debian_slim().pip_install("slack-sdk") 41 42 43 @stub.function( 44 image=slack_sdk_image, 45 secrets=[modal.Secret.from_name("scraper-slack-secret")], 46 ) 47 def bot_token_msg(channel, message): 48 import slack_sdk 49 50 print(f"Posting {message} to #{channel}") 51 client = slack_sdk.WebClient(token=os.environ["SLACK_BOT_TOKEN"]) 52 client.chat_postMessage(channel=channel, text=message) 53 54 55 @stub.function() 56 def scrape(): 57 links_of_interest = ["http://modal.com"] 58 59 for links in get_links.map(links_of_interest): 60 for link in links: 61 bot_token_msg.remote("scraped-links", link) 62 63 64 @stub.function(schedule=modal.Period(days=1)) 65 def daily_scrape(): 66 scrape.remote() 67 68 69 @stub.local_entrypoint() 70 def run(): 71 scrape.remote() 72 [end of misc/webscraper.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/10_integrations/webscraper.py b/10_integrations/webscraper.py --- a/10_integrations/webscraper.py +++ b/10_integrations/webscraper.py @@ -15,7 +15,7 @@ "apt-get install -y software-properties-common", "apt-add-repository non-free", "apt-add-repository contrib", - "pip install playwright==1.30.0", + "pip install playwright==1.42.0", "playwright install-deps chromium", "playwright install chromium", ) diff --git a/misc/webscraper.py b/misc/webscraper.py deleted file mode 100644 --- a/misc/webscraper.py +++ /dev/null @@ -1,71 +0,0 @@ -# --- -# runtimes: ["runc", "gvisor"] -# --- -import os - -import modal - -stub = modal.Stub("example-linkscraper") - - -playwright_image = modal.Image.debian_slim( - python_version="3.10" -).run_commands( # Doesn't work with 3.11 yet - "apt-get update", - "apt-get install -y software-properties-common", - "apt-add-repository non-free", - "apt-add-repository contrib", - "pip install playwright==1.30.0", - "playwright install-deps chromium", - "playwright install chromium", -) - - [email protected](image=playwright_image) -async def get_links(url: str) -> set[str]: - from playwright.async_api import async_playwright - - async with async_playwright() as p: - browser = await p.chromium.launch() - page = await browser.new_page() - await page.goto(url) - links = await page.eval_on_selector_all( - "a[href]", "elements => elements.map(element => element.href)" - ) - await browser.close() - - return set(links) - - -slack_sdk_image = modal.Image.debian_slim().pip_install("slack-sdk") - - [email protected]( - image=slack_sdk_image, - secrets=[modal.Secret.from_name("scraper-slack-secret")], -) -def bot_token_msg(channel, message): - import slack_sdk - - print(f"Posting {message} to #{channel}") - client = slack_sdk.WebClient(token=os.environ["SLACK_BOT_TOKEN"]) - client.chat_postMessage(channel=channel, text=message) - - [email protected]() -def scrape(): - links_of_interest = ["http://modal.com"] - - for links in get_links.map(links_of_interest): - for link in links: - bot_token_msg.remote("scraped-links", link) - - [email protected](schedule=modal.Period(days=1)) -def daily_scrape(): - scrape.remote() - - [email protected]_entrypoint() -def run(): - scrape.remote()
{"golden_diff": "diff --git a/10_integrations/webscraper.py b/10_integrations/webscraper.py\n--- a/10_integrations/webscraper.py\n+++ b/10_integrations/webscraper.py\n@@ -15,7 +15,7 @@\n \"apt-get install -y software-properties-common\",\n \"apt-add-repository non-free\",\n \"apt-add-repository contrib\",\n- \"pip install playwright==1.30.0\",\n+ \"pip install playwright==1.42.0\",\n \"playwright install-deps chromium\",\n \"playwright install chromium\",\n )\ndiff --git a/misc/webscraper.py b/misc/webscraper.py\ndeleted file mode 100644\n--- a/misc/webscraper.py\n+++ /dev/null\n@@ -1,71 +0,0 @@\n-# ---\n-# runtimes: [\"runc\", \"gvisor\"]\n-# ---\n-import os\n-\n-import modal\n-\n-stub = modal.Stub(\"example-linkscraper\")\n-\n-\n-playwright_image = modal.Image.debian_slim(\n- python_version=\"3.10\"\n-).run_commands( # Doesn't work with 3.11 yet\n- \"apt-get update\",\n- \"apt-get install -y software-properties-common\",\n- \"apt-add-repository non-free\",\n- \"apt-add-repository contrib\",\n- \"pip install playwright==1.30.0\",\n- \"playwright install-deps chromium\",\n- \"playwright install chromium\",\n-)\n-\n-\[email protected](image=playwright_image)\n-async def get_links(url: str) -> set[str]:\n- from playwright.async_api import async_playwright\n-\n- async with async_playwright() as p:\n- browser = await p.chromium.launch()\n- page = await browser.new_page()\n- await page.goto(url)\n- links = await page.eval_on_selector_all(\n- \"a[href]\", \"elements => elements.map(element => element.href)\"\n- )\n- await browser.close()\n-\n- return set(links)\n-\n-\n-slack_sdk_image = modal.Image.debian_slim().pip_install(\"slack-sdk\")\n-\n-\[email protected](\n- image=slack_sdk_image,\n- secrets=[modal.Secret.from_name(\"scraper-slack-secret\")],\n-)\n-def bot_token_msg(channel, message):\n- import slack_sdk\n-\n- print(f\"Posting {message} to #{channel}\")\n- client = slack_sdk.WebClient(token=os.environ[\"SLACK_BOT_TOKEN\"])\n- client.chat_postMessage(channel=channel, text=message)\n-\n-\[email protected]()\n-def scrape():\n- links_of_interest = [\"http://modal.com\"]\n-\n- for links in get_links.map(links_of_interest):\n- for link in links:\n- bot_token_msg.remote(\"scraped-links\", link)\n-\n-\[email protected](schedule=modal.Period(days=1))\n-def daily_scrape():\n- scrape.remote()\n-\n-\[email protected]_entrypoint()\n-def run():\n- scrape.remote()\n", "issue": "remove duplicate webscraper example once #669 is merged\n\n", "before_files": [{"content": "# ---\n# runtimes: [\"runc\", \"gvisor\"]\n# ---\nimport os\n\nimport modal\n\nstub = modal.Stub(\"example-linkscraper\")\n\n\nplaywright_image = modal.Image.debian_slim(\n python_version=\"3.10\"\n).run_commands( # Doesn't work with 3.11 yet\n \"apt-get update\",\n \"apt-get install -y software-properties-common\",\n \"apt-add-repository non-free\",\n \"apt-add-repository contrib\",\n \"pip install playwright==1.30.0\",\n \"playwright install-deps chromium\",\n \"playwright install chromium\",\n)\n\n\[email protected](image=playwright_image)\nasync def get_links(url: str) -> set[str]:\n from playwright.async_api import async_playwright\n\n async with async_playwright() as p:\n browser = await p.chromium.launch()\n page = await browser.new_page()\n await page.goto(url)\n links = await page.eval_on_selector_all(\n \"a[href]\", \"elements => elements.map(element => element.href)\"\n )\n await browser.close()\n\n return set(links)\n\n\nslack_sdk_image = modal.Image.debian_slim().pip_install(\"slack-sdk\")\n\n\[email protected](\n image=slack_sdk_image,\n secrets=[modal.Secret.from_name(\"scraper-slack-secret\")],\n)\ndef bot_token_msg(channel, message):\n import slack_sdk\n\n print(f\"Posting {message} to #{channel}\")\n client = slack_sdk.WebClient(token=os.environ[\"SLACK_BOT_TOKEN\"])\n client.chat_postMessage(channel=channel, text=message)\n\n\[email protected]()\ndef scrape():\n links_of_interest = [\"http://modal.com\"]\n\n for links in get_links.map(links_of_interest):\n for link in links:\n bot_token_msg.remote(\"scraped-links\", link)\n\n\[email protected](schedule=modal.Period(days=1))\ndef daily_scrape():\n scrape.remote()\n\n\[email protected]_entrypoint()\ndef run():\n scrape.remote()\n", "path": "10_integrations/webscraper.py"}, {"content": "# ---\n# runtimes: [\"runc\", \"gvisor\"]\n# ---\nimport os\n\nimport modal\n\nstub = modal.Stub(\"example-linkscraper\")\n\n\nplaywright_image = modal.Image.debian_slim(\n python_version=\"3.10\"\n).run_commands( # Doesn't work with 3.11 yet\n \"apt-get update\",\n \"apt-get install -y software-properties-common\",\n \"apt-add-repository non-free\",\n \"apt-add-repository contrib\",\n \"pip install playwright==1.30.0\",\n \"playwright install-deps chromium\",\n \"playwright install chromium\",\n)\n\n\[email protected](image=playwright_image)\nasync def get_links(url: str) -> set[str]:\n from playwright.async_api import async_playwright\n\n async with async_playwright() as p:\n browser = await p.chromium.launch()\n page = await browser.new_page()\n await page.goto(url)\n links = await page.eval_on_selector_all(\n \"a[href]\", \"elements => elements.map(element => element.href)\"\n )\n await browser.close()\n\n return set(links)\n\n\nslack_sdk_image = modal.Image.debian_slim().pip_install(\"slack-sdk\")\n\n\[email protected](\n image=slack_sdk_image,\n secrets=[modal.Secret.from_name(\"scraper-slack-secret\")],\n)\ndef bot_token_msg(channel, message):\n import slack_sdk\n\n print(f\"Posting {message} to #{channel}\")\n client = slack_sdk.WebClient(token=os.environ[\"SLACK_BOT_TOKEN\"])\n client.chat_postMessage(channel=channel, text=message)\n\n\[email protected]()\ndef scrape():\n links_of_interest = [\"http://modal.com\"]\n\n for links in get_links.map(links_of_interest):\n for link in links:\n bot_token_msg.remote(\"scraped-links\", link)\n\n\[email protected](schedule=modal.Period(days=1))\ndef daily_scrape():\n scrape.remote()\n\n\[email protected]_entrypoint()\ndef run():\n scrape.remote()\n", "path": "misc/webscraper.py"}]}
1,710
666
gh_patches_debug_13891
rasdani/github-patches
git_diff
Mailu__Mailu-1862
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Radicale password check fails (length limited?) So far, the webdav radicale is protected with basic auth (apparently htaccess or similar). If user password > 8 then it fails to connect. we should remove this limitation to let use stong passwords Radicale password check fails (length limited?) So far, the webdav radicale is protected with basic auth (apparently htaccess or similar). If user password > 8 then it fails to connect. we should remove this limitation to let use stong passwords </issue> <code> [start of core/admin/mailu/internal/views/auth.py] 1 from mailu import models, utils 2 from mailu.internal import internal, nginx 3 from flask import current_app as app 4 5 import flask 6 import flask_login 7 import base64 8 import ipaddress 9 10 11 @internal.route("/auth/email") 12 def nginx_authentication(): 13 """ Main authentication endpoint for Nginx email server 14 """ 15 limiter = utils.limiter.get_limiter(app.config["AUTH_RATELIMIT"], "auth-ip") 16 client_ip = flask.request.headers["Client-Ip"] 17 if not limiter.test(client_ip): 18 response = flask.Response() 19 response.headers['Auth-Status'] = 'Authentication rate limit from one source exceeded' 20 response.headers['Auth-Error-Code'] = '451 4.3.2' 21 if int(flask.request.headers['Auth-Login-Attempt']) < 10: 22 response.headers['Auth-Wait'] = '3' 23 return response 24 headers = nginx.handle_authentication(flask.request.headers) 25 response = flask.Response() 26 for key, value in headers.items(): 27 response.headers[key] = str(value) 28 if ("Auth-Status" not in headers) or (headers["Auth-Status"] != "OK"): 29 limit_subnet = str(app.config["AUTH_RATELIMIT_SUBNET"]) != 'False' 30 subnet = ipaddress.ip_network(app.config["SUBNET"]) 31 if limit_subnet or ipaddress.ip_address(client_ip) not in subnet: 32 limiter.hit(flask.request.headers["Client-Ip"]) 33 return response 34 35 36 @internal.route("/auth/admin") 37 def admin_authentication(): 38 """ Fails if the user is not an authenticated admin. 39 """ 40 if (not flask_login.current_user.is_anonymous 41 and flask_login.current_user.global_admin 42 and flask_login.current_user.enabled): 43 return "" 44 return flask.abort(403) 45 46 @internal.route("/auth/user") 47 def user_authentication(): 48 """ Fails if the user is not authenticated. 49 """ 50 if (not flask_login.current_user.is_anonymous 51 and flask_login.current_user.enabled): 52 response = flask.Response() 53 response.headers["X-User"] = flask_login.current_user.get_id() 54 response.headers["X-User-Token"] = models.User.get_temp_token(flask_login.current_user.get_id()) 55 return response 56 return flask.abort(403) 57 58 59 @internal.route("/auth/basic") 60 def basic_authentication(): 61 """ Tries to authenticate using the Authorization header. 62 """ 63 authorization = flask.request.headers.get("Authorization") 64 if authorization and authorization.startswith("Basic "): 65 encoded = authorization.replace("Basic ", "") 66 user_email, password = base64.b64decode(encoded).split(b":") 67 user = models.User.query.get(user_email.decode("utf8")) 68 if nginx.check_credentials(user, password.decode('utf-8'), flask.request.remote_addr, "web"): 69 response = flask.Response() 70 response.headers["X-User"] = user.email 71 return response 72 response = flask.Response(status=401) 73 response.headers["WWW-Authenticate"] = 'Basic realm="Login Required"' 74 return response 75 [end of core/admin/mailu/internal/views/auth.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py --- a/core/admin/mailu/internal/views/auth.py +++ b/core/admin/mailu/internal/views/auth.py @@ -63,7 +63,7 @@ authorization = flask.request.headers.get("Authorization") if authorization and authorization.startswith("Basic "): encoded = authorization.replace("Basic ", "") - user_email, password = base64.b64decode(encoded).split(b":") + user_email, password = base64.b64decode(encoded).split(b":", 1) user = models.User.query.get(user_email.decode("utf8")) if nginx.check_credentials(user, password.decode('utf-8'), flask.request.remote_addr, "web"): response = flask.Response()
{"golden_diff": "diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py\n--- a/core/admin/mailu/internal/views/auth.py\n+++ b/core/admin/mailu/internal/views/auth.py\n@@ -63,7 +63,7 @@\n authorization = flask.request.headers.get(\"Authorization\")\n if authorization and authorization.startswith(\"Basic \"):\n encoded = authorization.replace(\"Basic \", \"\")\n- user_email, password = base64.b64decode(encoded).split(b\":\")\n+ user_email, password = base64.b64decode(encoded).split(b\":\", 1)\n user = models.User.query.get(user_email.decode(\"utf8\"))\n if nginx.check_credentials(user, password.decode('utf-8'), flask.request.remote_addr, \"web\"):\n response = flask.Response()\n", "issue": "Radicale password check fails (length limited?)\nSo far, the webdav radicale is protected with basic auth (apparently htaccess or similar).\r\nIf user password > 8 then it fails to connect. \r\nwe should remove this limitation to let use stong passwords\nRadicale password check fails (length limited?)\nSo far, the webdav radicale is protected with basic auth (apparently htaccess or similar).\r\nIf user password > 8 then it fails to connect. \r\nwe should remove this limitation to let use stong passwords\n", "before_files": [{"content": "from mailu import models, utils\nfrom mailu.internal import internal, nginx\nfrom flask import current_app as app\n\nimport flask\nimport flask_login\nimport base64\nimport ipaddress\n\n\[email protected](\"/auth/email\")\ndef nginx_authentication():\n \"\"\" Main authentication endpoint for Nginx email server\n \"\"\"\n limiter = utils.limiter.get_limiter(app.config[\"AUTH_RATELIMIT\"], \"auth-ip\")\n client_ip = flask.request.headers[\"Client-Ip\"]\n if not limiter.test(client_ip):\n response = flask.Response()\n response.headers['Auth-Status'] = 'Authentication rate limit from one source exceeded'\n response.headers['Auth-Error-Code'] = '451 4.3.2'\n if int(flask.request.headers['Auth-Login-Attempt']) < 10:\n response.headers['Auth-Wait'] = '3'\n return response\n headers = nginx.handle_authentication(flask.request.headers)\n response = flask.Response()\n for key, value in headers.items():\n response.headers[key] = str(value)\n if (\"Auth-Status\" not in headers) or (headers[\"Auth-Status\"] != \"OK\"):\n limit_subnet = str(app.config[\"AUTH_RATELIMIT_SUBNET\"]) != 'False'\n subnet = ipaddress.ip_network(app.config[\"SUBNET\"])\n if limit_subnet or ipaddress.ip_address(client_ip) not in subnet:\n limiter.hit(flask.request.headers[\"Client-Ip\"])\n return response\n\n\[email protected](\"/auth/admin\")\ndef admin_authentication():\n \"\"\" Fails if the user is not an authenticated admin.\n \"\"\"\n if (not flask_login.current_user.is_anonymous\n and flask_login.current_user.global_admin\n and flask_login.current_user.enabled):\n return \"\"\n return flask.abort(403)\n\[email protected](\"/auth/user\")\ndef user_authentication():\n \"\"\" Fails if the user is not authenticated.\n \"\"\"\n if (not flask_login.current_user.is_anonymous\n and flask_login.current_user.enabled):\n response = flask.Response()\n response.headers[\"X-User\"] = flask_login.current_user.get_id()\n response.headers[\"X-User-Token\"] = models.User.get_temp_token(flask_login.current_user.get_id())\n return response\n return flask.abort(403)\n\n\[email protected](\"/auth/basic\")\ndef basic_authentication():\n \"\"\" Tries to authenticate using the Authorization header.\n \"\"\"\n authorization = flask.request.headers.get(\"Authorization\")\n if authorization and authorization.startswith(\"Basic \"):\n encoded = authorization.replace(\"Basic \", \"\")\n user_email, password = base64.b64decode(encoded).split(b\":\")\n user = models.User.query.get(user_email.decode(\"utf8\"))\n if nginx.check_credentials(user, password.decode('utf-8'), flask.request.remote_addr, \"web\"):\n response = flask.Response()\n response.headers[\"X-User\"] = user.email\n return response\n response = flask.Response(status=401)\n response.headers[\"WWW-Authenticate\"] = 'Basic realm=\"Login Required\"'\n return response\n", "path": "core/admin/mailu/internal/views/auth.py"}]}
1,448
171
gh_patches_debug_11556
rasdani/github-patches
git_diff
zestedesavoir__zds-site-6174
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Doublon pour les logs en couleur dans l'env de dev Sur mon environnement de dev, les logs en couleur apparaissent en double : dans leur version couleur et puis dans leur version standard. ![image](https://user-images.githubusercontent.com/6664636/132962287-3983d73d-dae7-4f95-b7c1-ed86c63c51ea.png) OS : Ubuntu Budgie 20.04 </issue> <code> [start of zds/settings/dev.py] 1 from colorlog import ColoredFormatter 2 3 from .abstract_base import * 4 5 DEBUG = True 6 7 # NOTE: Can be removed once Django 3 is used 8 ALLOWED_HOSTS = [".localhost", "127.0.0.1", "[::1]"] 9 10 INSTALLED_APPS += ( 11 "debug_toolbar", 12 "django_extensions", 13 ) 14 15 MIDDLEWARE = ( 16 "debug_toolbar.middleware.DebugToolbarMiddleware", 17 "zds.middlewares.nocacheindevmiddleware.NoCacheInDevMiddleware", 18 ) + MIDDLEWARE 19 20 LOGGING = { 21 "version": 1, 22 "disable_existing_loggers": False, 23 "formatters": { 24 "verbose": { 25 "()": ColoredFormatter, 26 "format": "%(log_color)s %(levelname)s %(reset)s %(bold_black)s%(name)s%(reset)s %(message)s", 27 "log_colors": { 28 "DEBUG": "fg_white,bg_black", 29 "INFO": "fg_black,bg_bold_white", 30 "WARNING": "fg_black,bg_bold_yellow", 31 "ERROR": "fg_bold_white,bg_bold_red", 32 "CRITICAL": "fg_bold_white,bg_bold_red", 33 }, 34 }, 35 "django.server": { 36 "()": ColoredFormatter, 37 "format": "%(log_color)s%(message)s", 38 "log_colors": { 39 "INFO": "bold_black", 40 "WARNING": "bold_yellow", 41 "ERROR": "bold_red", 42 "CRITICAL": "bold_red", 43 }, 44 }, 45 }, 46 "handlers": { 47 "console": { 48 "level": "DEBUG", 49 "class": "logging.StreamHandler", 50 "formatter": "verbose", 51 }, 52 "django.server": { 53 "level": "DEBUG", 54 "class": "logging.StreamHandler", 55 "formatter": "django.server", 56 }, 57 }, 58 "loggers": { 59 "django": { 60 "level": "INFO", 61 "handlers": ["console"], 62 }, 63 "django.server": { 64 "level": "INFO", 65 "handlers": ["django.server"], 66 "propagate": False, 67 }, 68 "zds": { 69 "level": "INFO", 70 "handlers": ["console"], 71 }, 72 }, 73 } 74 75 ZDS_APP["site"]["url"] = "http://127.0.0.1:8000" 76 ZDS_APP["site"]["dns"] = "127.0.0.1:8000" 77 78 ZDS_APP["very_top_banner"] = { 79 "background_color": "#666", 80 "border_color": "#353535", 81 "color": "white", 82 "message": "Version locale", 83 "slug": "version-locale", 84 } 85 [end of zds/settings/dev.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/settings/dev.py b/zds/settings/dev.py --- a/zds/settings/dev.py +++ b/zds/settings/dev.py @@ -55,10 +55,12 @@ "formatter": "django.server", }, }, + "root": { + "handlers": ["console"], + }, "loggers": { "django": { "level": "INFO", - "handlers": ["console"], }, "django.server": { "level": "INFO", @@ -67,7 +69,6 @@ }, "zds": { "level": "INFO", - "handlers": ["console"], }, }, }
{"golden_diff": "diff --git a/zds/settings/dev.py b/zds/settings/dev.py\n--- a/zds/settings/dev.py\n+++ b/zds/settings/dev.py\n@@ -55,10 +55,12 @@\n \"formatter\": \"django.server\",\n },\n },\n+ \"root\": {\n+ \"handlers\": [\"console\"],\n+ },\n \"loggers\": {\n \"django\": {\n \"level\": \"INFO\",\n- \"handlers\": [\"console\"],\n },\n \"django.server\": {\n \"level\": \"INFO\",\n@@ -67,7 +69,6 @@\n },\n \"zds\": {\n \"level\": \"INFO\",\n- \"handlers\": [\"console\"],\n },\n },\n }\n", "issue": "Doublon pour les logs en couleur dans l'env de dev\nSur mon environnement de dev, les logs en couleur apparaissent en double : dans leur version couleur et puis dans leur version standard.\r\n\r\n![image](https://user-images.githubusercontent.com/6664636/132962287-3983d73d-dae7-4f95-b7c1-ed86c63c51ea.png)\r\n\r\nOS : Ubuntu Budgie 20.04\n", "before_files": [{"content": "from colorlog import ColoredFormatter\n\nfrom .abstract_base import *\n\nDEBUG = True\n\n# NOTE: Can be removed once Django 3 is used\nALLOWED_HOSTS = [\".localhost\", \"127.0.0.1\", \"[::1]\"]\n\nINSTALLED_APPS += (\n \"debug_toolbar\",\n \"django_extensions\",\n)\n\nMIDDLEWARE = (\n \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n \"zds.middlewares.nocacheindevmiddleware.NoCacheInDevMiddleware\",\n) + MIDDLEWARE\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"verbose\": {\n \"()\": ColoredFormatter,\n \"format\": \"%(log_color)s %(levelname)s %(reset)s %(bold_black)s%(name)s%(reset)s %(message)s\",\n \"log_colors\": {\n \"DEBUG\": \"fg_white,bg_black\",\n \"INFO\": \"fg_black,bg_bold_white\",\n \"WARNING\": \"fg_black,bg_bold_yellow\",\n \"ERROR\": \"fg_bold_white,bg_bold_red\",\n \"CRITICAL\": \"fg_bold_white,bg_bold_red\",\n },\n },\n \"django.server\": {\n \"()\": ColoredFormatter,\n \"format\": \"%(log_color)s%(message)s\",\n \"log_colors\": {\n \"INFO\": \"bold_black\",\n \"WARNING\": \"bold_yellow\",\n \"ERROR\": \"bold_red\",\n \"CRITICAL\": \"bold_red\",\n },\n },\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"django.server\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"django.server\",\n },\n },\n \"loggers\": {\n \"django\": {\n \"level\": \"INFO\",\n \"handlers\": [\"console\"],\n },\n \"django.server\": {\n \"level\": \"INFO\",\n \"handlers\": [\"django.server\"],\n \"propagate\": False,\n },\n \"zds\": {\n \"level\": \"INFO\",\n \"handlers\": [\"console\"],\n },\n },\n}\n\nZDS_APP[\"site\"][\"url\"] = \"http://127.0.0.1:8000\"\nZDS_APP[\"site\"][\"dns\"] = \"127.0.0.1:8000\"\n\nZDS_APP[\"very_top_banner\"] = {\n \"background_color\": \"#666\",\n \"border_color\": \"#353535\",\n \"color\": \"white\",\n \"message\": \"Version locale\",\n \"slug\": \"version-locale\",\n}\n", "path": "zds/settings/dev.py"}]}
1,400
157
gh_patches_debug_242
rasdani/github-patches
git_diff
sanic-org__sanic-1559
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 2 failed tests when tox is not used (missing fixture "benchmark") `pytest-benchmark` is not present in `tests_require`, so there are 2 failed tests in `tests/benchmark/test_route_resolution_benchmark.py` when tox is not used. This requirement is present in `tox.ini` so tox and Travis CI are working fine. (I don't know what's a better fix — disable the benchmark tests or add `pytest-benchmark` to `tests_require`, so I didn't create a PR) </issue> <code> [start of setup.py] 1 """ 2 Sanic 3 """ 4 import codecs 5 import os 6 import re 7 import sys 8 from distutils.util import strtobool 9 10 from setuptools import setup 11 from setuptools.command.test import test as TestCommand 12 13 14 class PyTest(TestCommand): 15 """ 16 Provide a Test runner to be used from setup.py to run unit tests 17 """ 18 19 user_options = [("pytest-args=", "a", "Arguments to pass to pytest")] 20 21 def initialize_options(self): 22 TestCommand.initialize_options(self) 23 self.pytest_args = "" 24 25 def run_tests(self): 26 import shlex 27 import pytest 28 29 errno = pytest.main(shlex.split(self.pytest_args)) 30 sys.exit(errno) 31 32 33 def open_local(paths, mode="r", encoding="utf8"): 34 path = os.path.join(os.path.abspath(os.path.dirname(__file__)), *paths) 35 36 return codecs.open(path, mode, encoding) 37 38 39 with open_local(["sanic", "__init__.py"], encoding="latin1") as fp: 40 try: 41 version = re.findall( 42 r"^__version__ = \"([^']+)\"\r?$", fp.read(), re.M 43 )[0] 44 except IndexError: 45 raise RuntimeError("Unable to determine version.") 46 47 with open_local(["README.rst"]) as rm: 48 long_description = rm.read() 49 50 setup_kwargs = { 51 "name": "sanic", 52 "version": version, 53 "url": "http://github.com/channelcat/sanic/", 54 "license": "MIT", 55 "author": "Channel Cat", 56 "author_email": "[email protected]", 57 "description": ( 58 "A microframework based on uvloop, httptools, and learnings of flask" 59 ), 60 "long_description": long_description, 61 "packages": ["sanic"], 62 "platforms": "any", 63 "classifiers": [ 64 "Development Status :: 4 - Beta", 65 "Environment :: Web Environment", 66 "License :: OSI Approved :: MIT License", 67 "Programming Language :: Python :: 3.5", 68 "Programming Language :: Python :: 3.6", 69 "Programming Language :: Python :: 3.7", 70 ], 71 } 72 73 env_dependency = ( 74 '; sys_platform != "win32" ' 'and implementation_name == "cpython"' 75 ) 76 ujson = "ujson>=1.35" + env_dependency 77 uvloop = "uvloop>=0.5.3" + env_dependency 78 79 requirements = [ 80 "httptools>=0.0.10", 81 uvloop, 82 ujson, 83 "aiofiles>=0.3.0", 84 "websockets>=6.0,<7.0", 85 "multidict>=4.0,<5.0", 86 ] 87 88 tests_require = [ 89 "pytest==4.1.0", 90 "multidict>=4.0,<5.0", 91 "gunicorn", 92 "pytest-cov", 93 "aiohttp>=2.3.0,<=3.2.1", 94 "beautifulsoup4", 95 uvloop, 96 ujson, 97 "pytest-sanic", 98 "pytest-sugar", 99 ] 100 101 if strtobool(os.environ.get("SANIC_NO_UJSON", "no")): 102 print("Installing without uJSON") 103 requirements.remove(ujson) 104 tests_require.remove(ujson) 105 106 # 'nt' means windows OS 107 if strtobool(os.environ.get("SANIC_NO_UVLOOP", "no")): 108 print("Installing without uvLoop") 109 requirements.remove(uvloop) 110 tests_require.remove(uvloop) 111 112 extras_require = { 113 "test": tests_require, 114 "dev": tests_require + ["aiofiles", "tox", "black", "flake8"], 115 "docs": [ 116 "sphinx", 117 "sphinx_rtd_theme", 118 "recommonmark", 119 "sphinxcontrib-asyncio", 120 "docutils", 121 "pygments" 122 ], 123 } 124 125 setup_kwargs["install_requires"] = requirements 126 setup_kwargs["tests_require"] = tests_require 127 setup_kwargs["extras_require"] = extras_require 128 setup_kwargs["cmdclass"] = {"test": PyTest} 129 setup(**setup_kwargs) 130 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -96,6 +96,7 @@ ujson, "pytest-sanic", "pytest-sugar", + "pytest-benchmark", ] if strtobool(os.environ.get("SANIC_NO_UJSON", "no")):
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -96,6 +96,7 @@\n ujson,\n \"pytest-sanic\",\n \"pytest-sugar\",\n+ \"pytest-benchmark\",\n ]\n \n if strtobool(os.environ.get(\"SANIC_NO_UJSON\", \"no\")):\n", "issue": "2 failed tests when tox is not used (missing fixture \"benchmark\")\n`pytest-benchmark` is not present in `tests_require`, so there are 2 failed tests in `tests/benchmark/test_route_resolution_benchmark.py` when tox is not used.\r\n\r\nThis requirement is present in `tox.ini` so tox and Travis CI are working fine.\r\n\r\n(I don't know what's a better fix \u2014 disable the benchmark tests or add `pytest-benchmark` to `tests_require`, so I didn't create a PR)\n", "before_files": [{"content": "\"\"\"\nSanic\n\"\"\"\nimport codecs\nimport os\nimport re\nimport sys\nfrom distutils.util import strtobool\n\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\n\n\nclass PyTest(TestCommand):\n \"\"\"\n Provide a Test runner to be used from setup.py to run unit tests\n \"\"\"\n\n user_options = [(\"pytest-args=\", \"a\", \"Arguments to pass to pytest\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = \"\"\n\n def run_tests(self):\n import shlex\n import pytest\n\n errno = pytest.main(shlex.split(self.pytest_args))\n sys.exit(errno)\n\n\ndef open_local(paths, mode=\"r\", encoding=\"utf8\"):\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)), *paths)\n\n return codecs.open(path, mode, encoding)\n\n\nwith open_local([\"sanic\", \"__init__.py\"], encoding=\"latin1\") as fp:\n try:\n version = re.findall(\n r\"^__version__ = \\\"([^']+)\\\"\\r?$\", fp.read(), re.M\n )[0]\n except IndexError:\n raise RuntimeError(\"Unable to determine version.\")\n\nwith open_local([\"README.rst\"]) as rm:\n long_description = rm.read()\n\nsetup_kwargs = {\n \"name\": \"sanic\",\n \"version\": version,\n \"url\": \"http://github.com/channelcat/sanic/\",\n \"license\": \"MIT\",\n \"author\": \"Channel Cat\",\n \"author_email\": \"[email protected]\",\n \"description\": (\n \"A microframework based on uvloop, httptools, and learnings of flask\"\n ),\n \"long_description\": long_description,\n \"packages\": [\"sanic\"],\n \"platforms\": \"any\",\n \"classifiers\": [\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n}\n\nenv_dependency = (\n '; sys_platform != \"win32\" ' 'and implementation_name == \"cpython\"'\n)\nujson = \"ujson>=1.35\" + env_dependency\nuvloop = \"uvloop>=0.5.3\" + env_dependency\n\nrequirements = [\n \"httptools>=0.0.10\",\n uvloop,\n ujson,\n \"aiofiles>=0.3.0\",\n \"websockets>=6.0,<7.0\",\n \"multidict>=4.0,<5.0\",\n]\n\ntests_require = [\n \"pytest==4.1.0\",\n \"multidict>=4.0,<5.0\",\n \"gunicorn\",\n \"pytest-cov\",\n \"aiohttp>=2.3.0,<=3.2.1\",\n \"beautifulsoup4\",\n uvloop,\n ujson,\n \"pytest-sanic\",\n \"pytest-sugar\",\n]\n\nif strtobool(os.environ.get(\"SANIC_NO_UJSON\", \"no\")):\n print(\"Installing without uJSON\")\n requirements.remove(ujson)\n tests_require.remove(ujson)\n\n# 'nt' means windows OS\nif strtobool(os.environ.get(\"SANIC_NO_UVLOOP\", \"no\")):\n print(\"Installing without uvLoop\")\n requirements.remove(uvloop)\n tests_require.remove(uvloop)\n\nextras_require = {\n \"test\": tests_require,\n \"dev\": tests_require + [\"aiofiles\", \"tox\", \"black\", \"flake8\"],\n \"docs\": [\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"recommonmark\",\n \"sphinxcontrib-asyncio\",\n \"docutils\",\n \"pygments\"\n ],\n}\n\nsetup_kwargs[\"install_requires\"] = requirements\nsetup_kwargs[\"tests_require\"] = tests_require\nsetup_kwargs[\"extras_require\"] = extras_require\nsetup_kwargs[\"cmdclass\"] = {\"test\": PyTest}\nsetup(**setup_kwargs)\n", "path": "setup.py"}]}
1,809
76
gh_patches_debug_40285
rasdani/github-patches
git_diff
optuna__optuna-3115
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [RFC] Show progress bar based on `timeout` <!-- Please write a clear and concise description of the feature proposal. --> ## Motivation The current [`study.optimize`](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize) shows the progress bar when we specify the number of trials. We can show the progress bar based on the `timeout` value as well. Note that before implementing this feature, we need to decide the priority of `n_trials` and `timeout`. When we specify both values as not `None` values, there are two ways to show the progress bar. Moreover, we need to discuss the case when both `None`. I suppose not showing the progress bar makes sense when both are `None`. ## Description <!-- Please write a detailed description of the new feature. --> - Show progress bar using `timeout` value. - Add test by following https://github.com/optuna/optuna/pull/2900 ## Alternatives (optional) <!-- Please write a clear and concise description of any alternative solutions or features you've considered. --> ## Additional context (optional) We might need to support the `n_jobs!=1` case depending on https://github.com/optuna/optuna/issues/2957. <!-- Please add any other context or screenshots about the feature request here. --> </issue> <code> [start of optuna/progress_bar.py] 1 import logging 2 from typing import Any 3 from typing import Optional 4 5 from tqdm.auto import tqdm 6 7 from optuna import logging as optuna_logging 8 from optuna._experimental import experimental 9 10 11 _tqdm_handler: Optional["_TqdmLoggingHandler"] = None 12 13 14 # Reference: https://gist.github.com/hvy/8b80c2cedf02b15c24f85d1fa17ebe02 15 class _TqdmLoggingHandler(logging.StreamHandler): 16 def emit(self, record: Any) -> None: 17 try: 18 msg = self.format(record) 19 tqdm.write(msg) 20 self.flush() 21 except (KeyboardInterrupt, SystemExit): 22 raise 23 except Exception: 24 self.handleError(record) 25 26 27 class _ProgressBar(object): 28 """Progress Bar implementation for :func:`~optuna.study.Study.optimize` on the top of `tqdm`. 29 30 Args: 31 is_valid: 32 Whether to show progress bars in :func:`~optuna.study.Study.optimize`. 33 n_trials: 34 The number of trials. 35 timeout: 36 Stop study after the given number of second(s). 37 """ 38 39 def __init__( 40 self, is_valid: bool, n_trials: Optional[int] = None, timeout: Optional[float] = None 41 ) -> None: 42 self._is_valid = is_valid 43 self._n_trials = n_trials 44 self._timeout = timeout 45 46 if self._is_valid: 47 self._init_valid() 48 49 # TODO(hvy): Remove initialization indirection via this method when the progress bar is no 50 # longer experimental. 51 @experimental("1.2.0", name="Progress bar") 52 def _init_valid(self) -> None: 53 self._progress_bar = tqdm(range(self._n_trials) if self._n_trials is not None else None) 54 global _tqdm_handler 55 56 _tqdm_handler = _TqdmLoggingHandler() 57 _tqdm_handler.setLevel(logging.INFO) 58 _tqdm_handler.setFormatter(optuna_logging.create_default_formatter()) 59 optuna_logging.disable_default_handler() 60 optuna_logging._get_library_root_logger().addHandler(_tqdm_handler) 61 62 def update(self, elapsed_seconds: Optional[float]) -> None: 63 """Update the progress bars if ``is_valid`` is :obj:`True`. 64 65 Args: 66 elapsed_seconds: 67 The time past since :func:`~optuna.study.Study.optimize` started. 68 """ 69 if self._is_valid: 70 self._progress_bar.update(1) 71 if self._timeout is not None and elapsed_seconds is not None: 72 self._progress_bar.set_postfix_str( 73 "{:.02f}/{} seconds".format(elapsed_seconds, self._timeout) 74 ) 75 76 def close(self) -> None: 77 """Close progress bars.""" 78 if self._is_valid: 79 self._progress_bar.close() 80 assert _tqdm_handler is not None 81 optuna_logging._get_library_root_logger().removeHandler(_tqdm_handler) 82 optuna_logging.enable_default_handler() 83 [end of optuna/progress_bar.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/optuna/progress_bar.py b/optuna/progress_bar.py --- a/optuna/progress_bar.py +++ b/optuna/progress_bar.py @@ -39,9 +39,11 @@ def __init__( self, is_valid: bool, n_trials: Optional[int] = None, timeout: Optional[float] = None ) -> None: - self._is_valid = is_valid + + self._is_valid = is_valid and (n_trials or timeout) is not None self._n_trials = n_trials self._timeout = timeout + self._last_elapsed_seconds = 0.0 if self._is_valid: self._init_valid() @@ -50,7 +52,21 @@ # longer experimental. @experimental("1.2.0", name="Progress bar") def _init_valid(self) -> None: - self._progress_bar = tqdm(range(self._n_trials) if self._n_trials is not None else None) + + if self._n_trials is not None: + self._progress_bar = tqdm(total=self._n_trials) + + else: + fmt = "{percentage:3.0f}%|{bar}| {elapsed}/{desc}" + self._progress_bar = tqdm(total=self._timeout, bar_format=fmt) + + # Using description string instead postfix string + # to display formatted timeout, since postfix carries + # extra comma space auto-format. + # https://github.com/tqdm/tqdm/issues/712 + total = tqdm.format_interval(self._timeout) + self._progress_bar.set_description_str(total) + global _tqdm_handler _tqdm_handler = _TqdmLoggingHandler() @@ -59,22 +75,37 @@ optuna_logging.disable_default_handler() optuna_logging._get_library_root_logger().addHandler(_tqdm_handler) - def update(self, elapsed_seconds: Optional[float]) -> None: + def update(self, elapsed_seconds: float) -> None: """Update the progress bars if ``is_valid`` is :obj:`True`. Args: elapsed_seconds: The time past since :func:`~optuna.study.Study.optimize` started. """ + if self._is_valid: - self._progress_bar.update(1) - if self._timeout is not None and elapsed_seconds is not None: - self._progress_bar.set_postfix_str( - "{:.02f}/{} seconds".format(elapsed_seconds, self._timeout) - ) + if self._n_trials is not None: + self._progress_bar.update(1) + if self._timeout is not None: + self._progress_bar.set_postfix_str( + "{:.02f}/{} seconds".format(elapsed_seconds, self._timeout) + ) + + elif self._timeout is not None: + time_diff = elapsed_seconds - self._last_elapsed_seconds + if elapsed_seconds > self._timeout: + # Clip elapsed time to avoid tqdm warnings. + time_diff -= elapsed_seconds - self._timeout + + self._progress_bar.update(time_diff) + self._last_elapsed_seconds = elapsed_seconds + + else: + assert False def close(self) -> None: """Close progress bars.""" + if self._is_valid: self._progress_bar.close() assert _tqdm_handler is not None
{"golden_diff": "diff --git a/optuna/progress_bar.py b/optuna/progress_bar.py\n--- a/optuna/progress_bar.py\n+++ b/optuna/progress_bar.py\n@@ -39,9 +39,11 @@\n def __init__(\n self, is_valid: bool, n_trials: Optional[int] = None, timeout: Optional[float] = None\n ) -> None:\n- self._is_valid = is_valid\n+\n+ self._is_valid = is_valid and (n_trials or timeout) is not None\n self._n_trials = n_trials\n self._timeout = timeout\n+ self._last_elapsed_seconds = 0.0\n \n if self._is_valid:\n self._init_valid()\n@@ -50,7 +52,21 @@\n # longer experimental.\n @experimental(\"1.2.0\", name=\"Progress bar\")\n def _init_valid(self) -> None:\n- self._progress_bar = tqdm(range(self._n_trials) if self._n_trials is not None else None)\n+\n+ if self._n_trials is not None:\n+ self._progress_bar = tqdm(total=self._n_trials)\n+\n+ else:\n+ fmt = \"{percentage:3.0f}%|{bar}| {elapsed}/{desc}\"\n+ self._progress_bar = tqdm(total=self._timeout, bar_format=fmt)\n+\n+ # Using description string instead postfix string\n+ # to display formatted timeout, since postfix carries\n+ # extra comma space auto-format.\n+ # https://github.com/tqdm/tqdm/issues/712\n+ total = tqdm.format_interval(self._timeout)\n+ self._progress_bar.set_description_str(total)\n+\n global _tqdm_handler\n \n _tqdm_handler = _TqdmLoggingHandler()\n@@ -59,22 +75,37 @@\n optuna_logging.disable_default_handler()\n optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)\n \n- def update(self, elapsed_seconds: Optional[float]) -> None:\n+ def update(self, elapsed_seconds: float) -> None:\n \"\"\"Update the progress bars if ``is_valid`` is :obj:`True`.\n \n Args:\n elapsed_seconds:\n The time past since :func:`~optuna.study.Study.optimize` started.\n \"\"\"\n+\n if self._is_valid:\n- self._progress_bar.update(1)\n- if self._timeout is not None and elapsed_seconds is not None:\n- self._progress_bar.set_postfix_str(\n- \"{:.02f}/{} seconds\".format(elapsed_seconds, self._timeout)\n- )\n+ if self._n_trials is not None:\n+ self._progress_bar.update(1)\n+ if self._timeout is not None:\n+ self._progress_bar.set_postfix_str(\n+ \"{:.02f}/{} seconds\".format(elapsed_seconds, self._timeout)\n+ )\n+\n+ elif self._timeout is not None:\n+ time_diff = elapsed_seconds - self._last_elapsed_seconds\n+ if elapsed_seconds > self._timeout:\n+ # Clip elapsed time to avoid tqdm warnings.\n+ time_diff -= elapsed_seconds - self._timeout\n+\n+ self._progress_bar.update(time_diff)\n+ self._last_elapsed_seconds = elapsed_seconds\n+\n+ else:\n+ assert False\n \n def close(self) -> None:\n \"\"\"Close progress bars.\"\"\"\n+\n if self._is_valid:\n self._progress_bar.close()\n assert _tqdm_handler is not None\n", "issue": "[RFC] Show progress bar based on `timeout`\n<!-- Please write a clear and concise description of the feature proposal. -->\r\n\r\n## Motivation\r\n\r\nThe current [`study.optimize`](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize) shows the progress bar when we specify the number of trials. We can show the progress bar based on the `timeout` value as well.\r\n\r\nNote that before implementing this feature, we need to decide the priority of `n_trials` and `timeout`. When we specify both values as not `None` values, there are two ways to show the progress bar. \r\n\r\nMoreover, we need to discuss the case when both `None`. I suppose not showing the progress bar makes sense when both are `None`.\r\n\r\n## Description\r\n\r\n<!-- Please write a detailed description of the new feature. -->\r\n\r\n\r\n- Show progress bar using `timeout` value.\r\n- Add test by following https://github.com/optuna/optuna/pull/2900\r\n\r\n## Alternatives (optional)\r\n\r\n\r\n<!-- Please write a clear and concise description of any alternative solutions or features you've considered. -->\r\n\r\n## Additional context (optional)\r\n\r\nWe might need to support the `n_jobs!=1` case depending on https://github.com/optuna/optuna/issues/2957.\r\n\r\n<!-- Please add any other context or screenshots about the feature request here. -->\r\n\n", "before_files": [{"content": "import logging\nfrom typing import Any\nfrom typing import Optional\n\nfrom tqdm.auto import tqdm\n\nfrom optuna import logging as optuna_logging\nfrom optuna._experimental import experimental\n\n\n_tqdm_handler: Optional[\"_TqdmLoggingHandler\"] = None\n\n\n# Reference: https://gist.github.com/hvy/8b80c2cedf02b15c24f85d1fa17ebe02\nclass _TqdmLoggingHandler(logging.StreamHandler):\n def emit(self, record: Any) -> None:\n try:\n msg = self.format(record)\n tqdm.write(msg)\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n self.handleError(record)\n\n\nclass _ProgressBar(object):\n \"\"\"Progress Bar implementation for :func:`~optuna.study.Study.optimize` on the top of `tqdm`.\n\n Args:\n is_valid:\n Whether to show progress bars in :func:`~optuna.study.Study.optimize`.\n n_trials:\n The number of trials.\n timeout:\n Stop study after the given number of second(s).\n \"\"\"\n\n def __init__(\n self, is_valid: bool, n_trials: Optional[int] = None, timeout: Optional[float] = None\n ) -> None:\n self._is_valid = is_valid\n self._n_trials = n_trials\n self._timeout = timeout\n\n if self._is_valid:\n self._init_valid()\n\n # TODO(hvy): Remove initialization indirection via this method when the progress bar is no\n # longer experimental.\n @experimental(\"1.2.0\", name=\"Progress bar\")\n def _init_valid(self) -> None:\n self._progress_bar = tqdm(range(self._n_trials) if self._n_trials is not None else None)\n global _tqdm_handler\n\n _tqdm_handler = _TqdmLoggingHandler()\n _tqdm_handler.setLevel(logging.INFO)\n _tqdm_handler.setFormatter(optuna_logging.create_default_formatter())\n optuna_logging.disable_default_handler()\n optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)\n\n def update(self, elapsed_seconds: Optional[float]) -> None:\n \"\"\"Update the progress bars if ``is_valid`` is :obj:`True`.\n\n Args:\n elapsed_seconds:\n The time past since :func:`~optuna.study.Study.optimize` started.\n \"\"\"\n if self._is_valid:\n self._progress_bar.update(1)\n if self._timeout is not None and elapsed_seconds is not None:\n self._progress_bar.set_postfix_str(\n \"{:.02f}/{} seconds\".format(elapsed_seconds, self._timeout)\n )\n\n def close(self) -> None:\n \"\"\"Close progress bars.\"\"\"\n if self._is_valid:\n self._progress_bar.close()\n assert _tqdm_handler is not None\n optuna_logging._get_library_root_logger().removeHandler(_tqdm_handler)\n optuna_logging.enable_default_handler()\n", "path": "optuna/progress_bar.py"}]}
1,641
769
gh_patches_debug_27844
rasdani/github-patches
git_diff
Zeroto521__my-data-toolkit-713
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ENH: New geoaccessor for GeoSeries to return tuple of coordinates `(x, y)` <!-- Thanks for contributing a pull request! Please follow these standard acronyms to start the commit message: - ENH: enhancement - BUG: bug fix - DOC: documentation - TYP: type annotations - TST: addition or modification of tests - MAINT: maintenance commit (refactoring, typos, etc.) - BLD: change related to building - REL: related to releasing - API: an (incompatible) API change - DEP: deprecate something, or remove a deprecated object - DEV: development tool or utility - REV: revert an earlier commit - PERF: performance improvement - BOT: always commit via a bot - CI: related to CI or CD - CLN: Code cleanup --> - [ ] closes #xxxx - [x] whatsnew entry as title ```python >>> import dtoolkit.geoaccessor >>> import geopandas as gpd >>> from shapely.geometry import Point >>> s = geopandas.GeoSeries([Point(1, 1), Point(2, 2), Point(3, 3)]) >>> s.xy 0 (1.0, 1.0) 1 (2.0, 2.0) 2 (3.0, 3.0) dtype: object ``` </issue> <code> [start of dtoolkit/geoaccessor/geoseries/xy.py] 1 import geopandas as gpd 2 import pandas as pd 3 4 from dtoolkit.geoaccessor.register import register_geoseries_method 5 6 7 @register_geoseries_method 8 def xy(s: gpd.GeoSeries, /) -> pd.Series: 9 """ 10 Return the x and y location of Point geometries in a GeoSeries. 11 12 Returns 13 ------- 14 Series 15 tuple of x and y coordinates. 16 17 See Also 18 -------- 19 geopandas.GeoSeries.x 20 geopandas.GeoSeries.y 21 22 Examples 23 -------- 24 >>> import dtoolkit.geoaccessor 25 >>> import geopandas as gpd 26 >>> from shapely.geometry import Point 27 >>> s = gpd.GeoSeries([Point(1, 1), Point(2, 2), Point(3, 3)]) 28 >>> s 29 0 POINT (1.00000 1.00000) 30 1 POINT (2.00000 2.00000) 31 2 POINT (3.00000 3.00000) 32 dtype: geometry 33 >>> s.xy() 34 0 (1.0, 1.0) 35 1 (2.0, 2.0) 36 2 (3.0, 3.0) 37 dtype: object 38 """ 39 40 return pd.concat((s.x, s.y), axis=1).apply(tuple, axis=1) 41 [end of dtoolkit/geoaccessor/geoseries/xy.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dtoolkit/geoaccessor/geoseries/xy.py b/dtoolkit/geoaccessor/geoseries/xy.py --- a/dtoolkit/geoaccessor/geoseries/xy.py +++ b/dtoolkit/geoaccessor/geoseries/xy.py @@ -5,14 +5,19 @@ @register_geoseries_method -def xy(s: gpd.GeoSeries, /) -> pd.Series: +def xy(s: gpd.GeoSeries, /, reverse: bool = False) -> pd.Series: """ Return the x and y location of Point geometries in a GeoSeries. + Parameters + ---------- + reverse : bool, default False + If True, return (y, x) instead. + Returns ------- Series - tuple of x and y coordinates. + tuple of coordinate. See Also -------- @@ -24,17 +29,26 @@ >>> import dtoolkit.geoaccessor >>> import geopandas as gpd >>> from shapely.geometry import Point - >>> s = gpd.GeoSeries([Point(1, 1), Point(2, 2), Point(3, 3)]) + >>> s = gpd.GeoSeries([Point(0, 1), Point(0, 2), Point(0, 3)]) >>> s - 0 POINT (1.00000 1.00000) - 1 POINT (2.00000 2.00000) - 2 POINT (3.00000 3.00000) + 0 POINT (0.00000 1.00000) + 1 POINT (0.00000 2.00000) + 2 POINT (0.00000 3.00000) dtype: geometry >>> s.xy() - 0 (1.0, 1.0) - 1 (2.0, 2.0) - 2 (3.0, 3.0) + 0 (0.0, 1.0) + 1 (0.0, 2.0) + 2 (0.0, 3.0) + dtype: object + + Set ``reverse=True`` to return (y, x). + + >>> s.xy(True) + 0 (1.0, 0.0) + 1 (2.0, 0.0) + 2 (3.0, 0.0) dtype: object """ - return pd.concat((s.x, s.y), axis=1).apply(tuple, axis=1) + coordinates = (s.y, s.x) if reverse else (s.x, s.y) + return pd.concat(coordinates, axis=1).apply(tuple, axis=1)
{"golden_diff": "diff --git a/dtoolkit/geoaccessor/geoseries/xy.py b/dtoolkit/geoaccessor/geoseries/xy.py\n--- a/dtoolkit/geoaccessor/geoseries/xy.py\n+++ b/dtoolkit/geoaccessor/geoseries/xy.py\n@@ -5,14 +5,19 @@\n \n \n @register_geoseries_method\n-def xy(s: gpd.GeoSeries, /) -> pd.Series:\n+def xy(s: gpd.GeoSeries, /, reverse: bool = False) -> pd.Series:\n \"\"\"\n Return the x and y location of Point geometries in a GeoSeries.\n \n+ Parameters\n+ ----------\n+ reverse : bool, default False\n+ If True, return (y, x) instead.\n+\n Returns\n -------\n Series\n- tuple of x and y coordinates.\n+ tuple of coordinate.\n \n See Also\n --------\n@@ -24,17 +29,26 @@\n >>> import dtoolkit.geoaccessor\n >>> import geopandas as gpd\n >>> from shapely.geometry import Point\n- >>> s = gpd.GeoSeries([Point(1, 1), Point(2, 2), Point(3, 3)])\n+ >>> s = gpd.GeoSeries([Point(0, 1), Point(0, 2), Point(0, 3)])\n >>> s\n- 0 POINT (1.00000 1.00000)\n- 1 POINT (2.00000 2.00000)\n- 2 POINT (3.00000 3.00000)\n+ 0 POINT (0.00000 1.00000)\n+ 1 POINT (0.00000 2.00000)\n+ 2 POINT (0.00000 3.00000)\n dtype: geometry\n >>> s.xy()\n- 0 (1.0, 1.0)\n- 1 (2.0, 2.0)\n- 2 (3.0, 3.0)\n+ 0 (0.0, 1.0)\n+ 1 (0.0, 2.0)\n+ 2 (0.0, 3.0)\n+ dtype: object\n+\n+ Set ``reverse=True`` to return (y, x).\n+\n+ >>> s.xy(True)\n+ 0 (1.0, 0.0)\n+ 1 (2.0, 0.0)\n+ 2 (3.0, 0.0)\n dtype: object\n \"\"\"\n \n- return pd.concat((s.x, s.y), axis=1).apply(tuple, axis=1)\n+ coordinates = (s.y, s.x) if reverse else (s.x, s.y)\n+ return pd.concat(coordinates, axis=1).apply(tuple, axis=1)\n", "issue": "ENH: New geoaccessor for GeoSeries to return tuple of coordinates `(x, y)`\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [ ] closes #xxxx\r\n- [x] whatsnew entry\r\n\r\nas title\r\n\r\n```python\r\n >>> import dtoolkit.geoaccessor\r\n >>> import geopandas as gpd\r\n >>> from shapely.geometry import Point\r\n >>> s = geopandas.GeoSeries([Point(1, 1), Point(2, 2), Point(3, 3)])\r\n >>> s.xy\r\n 0 (1.0, 1.0)\r\n 1 (2.0, 2.0)\r\n 2 (3.0, 3.0)\r\n dtype: object\r\n```\n", "before_files": [{"content": "import geopandas as gpd\nimport pandas as pd\n\nfrom dtoolkit.geoaccessor.register import register_geoseries_method\n\n\n@register_geoseries_method\ndef xy(s: gpd.GeoSeries, /) -> pd.Series:\n \"\"\"\n Return the x and y location of Point geometries in a GeoSeries.\n\n Returns\n -------\n Series\n tuple of x and y coordinates.\n\n See Also\n --------\n geopandas.GeoSeries.x\n geopandas.GeoSeries.y\n\n Examples\n --------\n >>> import dtoolkit.geoaccessor\n >>> import geopandas as gpd\n >>> from shapely.geometry import Point\n >>> s = gpd.GeoSeries([Point(1, 1), Point(2, 2), Point(3, 3)])\n >>> s\n 0 POINT (1.00000 1.00000)\n 1 POINT (2.00000 2.00000)\n 2 POINT (3.00000 3.00000)\n dtype: geometry\n >>> s.xy()\n 0 (1.0, 1.0)\n 1 (2.0, 2.0)\n 2 (3.0, 3.0)\n dtype: object\n \"\"\"\n\n return pd.concat((s.x, s.y), axis=1).apply(tuple, axis=1)\n", "path": "dtoolkit/geoaccessor/geoseries/xy.py"}]}
1,280
697
gh_patches_debug_18990
rasdani/github-patches
git_diff
qutebrowser__qutebrowser-5916
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Port completion.models.listcategory to QRegularExpression In `qutebrowser/completion/models/listcategory.py`, we use `QRegExp`, which is going to be removed in Qt 6: ```python rx = QRegExp(val, Qt.CaseInsensitive) self.setFilterRegExp(rx) ``` We should instead [port](https://doc-snapshots.qt.io/qt6-dev/qtcore-changes-qt6.html#regular-expression-classes) this to use [QRegularExpression](https://doc.qt.io/qt-5/qregularexpression.html) (or, perhaps, Python's `re`?) instead. ~~(Also, we should probably call `qtutils.ensure_valid(rx)`)~~ (done in `master` already) cc @rcorre </issue> <code> [start of qutebrowser/completion/models/listcategory.py] 1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: 2 3 # Copyright 2017-2020 Ryan Roden-Corrent (rcorre) <[email protected]> 4 # 5 # This file is part of qutebrowser. 6 # 7 # qutebrowser is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU General Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # qutebrowser is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU General Public License for more details. 16 # 17 # You should have received a copy of the GNU General Public License 18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. 19 20 """Completion category that uses a list of tuples as a data source.""" 21 22 import re 23 from typing import Iterable, Tuple 24 25 from PyQt5.QtCore import Qt, QSortFilterProxyModel, QRegExp 26 from PyQt5.QtGui import QStandardItem, QStandardItemModel 27 from PyQt5.QtWidgets import QWidget 28 29 from qutebrowser.completion.models import util 30 from qutebrowser.utils import qtutils, log 31 32 33 class ListCategory(QSortFilterProxyModel): 34 35 """Expose a list of items as a category for the CompletionModel.""" 36 37 def __init__(self, 38 name: str, 39 items: Iterable[Tuple[str, ...]], 40 sort: bool = True, 41 delete_func: util.DeleteFuncType = None, 42 parent: QWidget = None): 43 super().__init__(parent) 44 self.name = name 45 self.srcmodel = QStandardItemModel(parent=self) 46 self._pattern = '' 47 # ListCategory filters all columns 48 self.columns_to_filter = [0, 1, 2] 49 self.setFilterKeyColumn(-1) 50 for item in items: 51 self.srcmodel.appendRow([QStandardItem(x) for x in item]) 52 self.setSourceModel(self.srcmodel) 53 self.delete_func = delete_func 54 self._sort = sort 55 56 def set_pattern(self, val): 57 """Setter for pattern. 58 59 Args: 60 val: The value to set. 61 """ 62 self._pattern = val 63 val = re.sub(r' +', r' ', val) # See #1919 64 val = re.escape(val) 65 val = val.replace(r'\ ', '.*') 66 rx = QRegExp(val, Qt.CaseInsensitive) 67 qtutils.ensure_valid(rx) 68 self.setFilterRegExp(rx) 69 self.invalidate() 70 sortcol = 0 71 self.sort(sortcol) 72 73 def lessThan(self, lindex, rindex): 74 """Custom sorting implementation. 75 76 Prefers all items which start with self._pattern. Other than that, uses 77 normal Python string sorting. 78 79 Args: 80 lindex: The QModelIndex of the left item (*left* < right) 81 rindex: The QModelIndex of the right item (left < *right*) 82 83 Return: 84 True if left < right, else False 85 """ 86 qtutils.ensure_valid(lindex) 87 qtutils.ensure_valid(rindex) 88 89 left = self.srcmodel.data(lindex) 90 right = self.srcmodel.data(rindex) 91 92 if left is None or right is None: # pragma: no cover 93 log.completion.warning("Got unexpected None value, " 94 "left={!r} right={!r} " 95 "lindex={!r} rindex={!r}" 96 .format(left, right, lindex, rindex)) 97 return False 98 99 leftstart = left.startswith(self._pattern) 100 rightstart = right.startswith(self._pattern) 101 102 if leftstart and not rightstart: 103 return True 104 elif rightstart and not leftstart: 105 return False 106 elif self._sort: 107 return left < right 108 else: 109 return False 110 [end of qutebrowser/completion/models/listcategory.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/qutebrowser/completion/models/listcategory.py b/qutebrowser/completion/models/listcategory.py --- a/qutebrowser/completion/models/listcategory.py +++ b/qutebrowser/completion/models/listcategory.py @@ -22,7 +22,7 @@ import re from typing import Iterable, Tuple -from PyQt5.QtCore import Qt, QSortFilterProxyModel, QRegExp +from PyQt5.QtCore import QSortFilterProxyModel, QRegularExpression from PyQt5.QtGui import QStandardItem, QStandardItemModel from PyQt5.QtWidgets import QWidget @@ -63,9 +63,9 @@ val = re.sub(r' +', r' ', val) # See #1919 val = re.escape(val) val = val.replace(r'\ ', '.*') - rx = QRegExp(val, Qt.CaseInsensitive) + rx = QRegularExpression(val, QRegularExpression.CaseInsensitiveOption) qtutils.ensure_valid(rx) - self.setFilterRegExp(rx) + self.setFilterRegularExpression(rx) self.invalidate() sortcol = 0 self.sort(sortcol)
{"golden_diff": "diff --git a/qutebrowser/completion/models/listcategory.py b/qutebrowser/completion/models/listcategory.py\n--- a/qutebrowser/completion/models/listcategory.py\n+++ b/qutebrowser/completion/models/listcategory.py\n@@ -22,7 +22,7 @@\n import re\n from typing import Iterable, Tuple\n \n-from PyQt5.QtCore import Qt, QSortFilterProxyModel, QRegExp\n+from PyQt5.QtCore import QSortFilterProxyModel, QRegularExpression\n from PyQt5.QtGui import QStandardItem, QStandardItemModel\n from PyQt5.QtWidgets import QWidget\n \n@@ -63,9 +63,9 @@\n val = re.sub(r' +', r' ', val) # See #1919\n val = re.escape(val)\n val = val.replace(r'\\ ', '.*')\n- rx = QRegExp(val, Qt.CaseInsensitive)\n+ rx = QRegularExpression(val, QRegularExpression.CaseInsensitiveOption)\n qtutils.ensure_valid(rx)\n- self.setFilterRegExp(rx)\n+ self.setFilterRegularExpression(rx)\n self.invalidate()\n sortcol = 0\n self.sort(sortcol)\n", "issue": "Port completion.models.listcategory to QRegularExpression\nIn `qutebrowser/completion/models/listcategory.py`, we use `QRegExp`, which is going to be removed in Qt 6:\r\n\r\n```python\r\n rx = QRegExp(val, Qt.CaseInsensitive)\r\n self.setFilterRegExp(rx)\r\n```\r\n\r\nWe should instead [port](https://doc-snapshots.qt.io/qt6-dev/qtcore-changes-qt6.html#regular-expression-classes) this to use [QRegularExpression](https://doc.qt.io/qt-5/qregularexpression.html) (or, perhaps, Python's `re`?) instead.\r\n\r\n~~(Also, we should probably call `qtutils.ensure_valid(rx)`)~~ (done in `master` already)\r\n\r\ncc @rcorre \n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017-2020 Ryan Roden-Corrent (rcorre) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Completion category that uses a list of tuples as a data source.\"\"\"\n\nimport re\nfrom typing import Iterable, Tuple\n\nfrom PyQt5.QtCore import Qt, QSortFilterProxyModel, QRegExp\nfrom PyQt5.QtGui import QStandardItem, QStandardItemModel\nfrom PyQt5.QtWidgets import QWidget\n\nfrom qutebrowser.completion.models import util\nfrom qutebrowser.utils import qtutils, log\n\n\nclass ListCategory(QSortFilterProxyModel):\n\n \"\"\"Expose a list of items as a category for the CompletionModel.\"\"\"\n\n def __init__(self,\n name: str,\n items: Iterable[Tuple[str, ...]],\n sort: bool = True,\n delete_func: util.DeleteFuncType = None,\n parent: QWidget = None):\n super().__init__(parent)\n self.name = name\n self.srcmodel = QStandardItemModel(parent=self)\n self._pattern = ''\n # ListCategory filters all columns\n self.columns_to_filter = [0, 1, 2]\n self.setFilterKeyColumn(-1)\n for item in items:\n self.srcmodel.appendRow([QStandardItem(x) for x in item])\n self.setSourceModel(self.srcmodel)\n self.delete_func = delete_func\n self._sort = sort\n\n def set_pattern(self, val):\n \"\"\"Setter for pattern.\n\n Args:\n val: The value to set.\n \"\"\"\n self._pattern = val\n val = re.sub(r' +', r' ', val) # See #1919\n val = re.escape(val)\n val = val.replace(r'\\ ', '.*')\n rx = QRegExp(val, Qt.CaseInsensitive)\n qtutils.ensure_valid(rx)\n self.setFilterRegExp(rx)\n self.invalidate()\n sortcol = 0\n self.sort(sortcol)\n\n def lessThan(self, lindex, rindex):\n \"\"\"Custom sorting implementation.\n\n Prefers all items which start with self._pattern. Other than that, uses\n normal Python string sorting.\n\n Args:\n lindex: The QModelIndex of the left item (*left* < right)\n rindex: The QModelIndex of the right item (left < *right*)\n\n Return:\n True if left < right, else False\n \"\"\"\n qtutils.ensure_valid(lindex)\n qtutils.ensure_valid(rindex)\n\n left = self.srcmodel.data(lindex)\n right = self.srcmodel.data(rindex)\n\n if left is None or right is None: # pragma: no cover\n log.completion.warning(\"Got unexpected None value, \"\n \"left={!r} right={!r} \"\n \"lindex={!r} rindex={!r}\"\n .format(left, right, lindex, rindex))\n return False\n\n leftstart = left.startswith(self._pattern)\n rightstart = right.startswith(self._pattern)\n\n if leftstart and not rightstart:\n return True\n elif rightstart and not leftstart:\n return False\n elif self._sort:\n return left < right\n else:\n return False\n", "path": "qutebrowser/completion/models/listcategory.py"}]}
1,788
246
gh_patches_debug_36242
rasdani/github-patches
git_diff
pytorch__ignite-1756
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve `ignite.contirb.metrics` implementation For these metrics in `ignite.contrib.metrics` : - [Average Precision](https://github.com/pytorch/ignite/blob/master/ignite/contrib/metrics/average_precision.py) - [Precision Recall Curve](https://github.com/pytorch/ignite/blob/master/ignite/contrib/metrics/precision_recall_curve.py) - [Roc Auc](https://github.com/pytorch/ignite/blob/master/ignite/contrib/metrics/roc_auc.py) We need to make the implementation simpler. Similar to the one here #1690. New arg `device` will be added, and we need to add the necessary distributed tests as well. See here for the tests https://github.com/pytorch/ignite/blob/master/tests/ignite/contrib/metrics/regression/test_canberra_metric.py#L99 </issue> <code> [start of ignite/contrib/metrics/average_precision.py] 1 from typing import Callable 2 3 import torch 4 5 from ignite.metrics import EpochMetric 6 7 8 def average_precision_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float: 9 try: 10 from sklearn.metrics import average_precision_score 11 except ImportError: 12 raise RuntimeError("This contrib module requires sklearn to be installed.") 13 14 y_true = y_targets.numpy() 15 y_pred = y_preds.numpy() 16 return average_precision_score(y_true, y_pred) 17 18 19 class AveragePrecision(EpochMetric): 20 """Computes Average Precision accumulating predictions and the ground-truth during an epoch 21 and applying `sklearn.metrics.average_precision_score <https://scikit-learn.org/stable/modules/generated/ 22 sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score>`_ . 23 24 Args: 25 output_transform: a callable that is used to transform the 26 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the 27 form expected by the metric. This can be useful if, for example, you have a multi-output model and 28 you want to compute the metric with respect to one of the outputs. 29 check_compute_fn: Default False. If True, `average_precision_score 30 <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html 31 #sklearn.metrics.average_precision_score>`_ is run on the first batch of data to ensure there are 32 no issues. User will be warned in case there are any issues computing the function. 33 34 AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or 35 confidence values. To apply an activation to y_pred, use output_transform as shown below: 36 37 .. code-block:: python 38 39 def activated_output_transform(output): 40 y_pred, y = output 41 y_pred = torch.softmax(y_pred, dim=1) 42 return y_pred, y 43 44 avg_precision = AveragePrecision(activated_output_transform) 45 46 """ 47 48 def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None: 49 super(AveragePrecision, self).__init__( 50 average_precision_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn 51 ) 52 [end of ignite/contrib/metrics/average_precision.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ignite/contrib/metrics/average_precision.py b/ignite/contrib/metrics/average_precision.py --- a/ignite/contrib/metrics/average_precision.py +++ b/ignite/contrib/metrics/average_precision.py @@ -1,4 +1,4 @@ -from typing import Callable +from typing import Callable, Optional, Union import torch @@ -6,13 +6,10 @@ def average_precision_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float: - try: - from sklearn.metrics import average_precision_score - except ImportError: - raise RuntimeError("This contrib module requires sklearn to be installed.") + from sklearn.metrics import average_precision_score - y_true = y_targets.numpy() - y_pred = y_preds.numpy() + y_true = y_targets.cpu().numpy() + y_pred = y_preds.cpu().numpy() return average_precision_score(y_true, y_pred) @@ -30,6 +27,7 @@ <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html #sklearn.metrics.average_precision_score>`_ is run on the first batch of data to ensure there are no issues. User will be warned in case there are any issues computing the function. + device: optional device specification for internal storage. AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence values. To apply an activation to y_pred, use output_transform as shown below: @@ -45,7 +43,21 @@ """ - def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None: + def __init__( + self, + output_transform: Callable = lambda x: x, + check_compute_fn: bool = False, + device: Union[str, torch.device] = torch.device("cpu"), + ): + + try: + from sklearn.metrics import average_precision_score + except ImportError: + raise RuntimeError("This contrib module requires sklearn to be installed.") + super(AveragePrecision, self).__init__( - average_precision_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn + average_precision_compute_fn, + output_transform=output_transform, + check_compute_fn=check_compute_fn, + device=device, )
{"golden_diff": "diff --git a/ignite/contrib/metrics/average_precision.py b/ignite/contrib/metrics/average_precision.py\n--- a/ignite/contrib/metrics/average_precision.py\n+++ b/ignite/contrib/metrics/average_precision.py\n@@ -1,4 +1,4 @@\n-from typing import Callable\n+from typing import Callable, Optional, Union\n \n import torch\n \n@@ -6,13 +6,10 @@\n \n \n def average_precision_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float:\n- try:\n- from sklearn.metrics import average_precision_score\n- except ImportError:\n- raise RuntimeError(\"This contrib module requires sklearn to be installed.\")\n+ from sklearn.metrics import average_precision_score\n \n- y_true = y_targets.numpy()\n- y_pred = y_preds.numpy()\n+ y_true = y_targets.cpu().numpy()\n+ y_pred = y_preds.cpu().numpy()\n return average_precision_score(y_true, y_pred)\n \n \n@@ -30,6 +27,7 @@\n <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html\n #sklearn.metrics.average_precision_score>`_ is run on the first batch of data to ensure there are\n no issues. User will be warned in case there are any issues computing the function.\n+ device: optional device specification for internal storage.\n \n AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or\n confidence values. To apply an activation to y_pred, use output_transform as shown below:\n@@ -45,7 +43,21 @@\n \n \"\"\"\n \n- def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None:\n+ def __init__(\n+ self,\n+ output_transform: Callable = lambda x: x,\n+ check_compute_fn: bool = False,\n+ device: Union[str, torch.device] = torch.device(\"cpu\"),\n+ ):\n+\n+ try:\n+ from sklearn.metrics import average_precision_score\n+ except ImportError:\n+ raise RuntimeError(\"This contrib module requires sklearn to be installed.\")\n+\n super(AveragePrecision, self).__init__(\n- average_precision_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn\n+ average_precision_compute_fn,\n+ output_transform=output_transform,\n+ check_compute_fn=check_compute_fn,\n+ device=device,\n )\n", "issue": "Improve `ignite.contirb.metrics` implementation \nFor these metrics in `ignite.contrib.metrics` :\r\n- [Average Precision](https://github.com/pytorch/ignite/blob/master/ignite/contrib/metrics/average_precision.py)\r\n- [Precision Recall Curve](https://github.com/pytorch/ignite/blob/master/ignite/contrib/metrics/precision_recall_curve.py)\r\n- [Roc Auc](https://github.com/pytorch/ignite/blob/master/ignite/contrib/metrics/roc_auc.py) \r\nWe need to make the implementation simpler. Similar to the one here #1690. New arg `device` will be added, and we need to add the necessary distributed tests as well. See here for the tests https://github.com/pytorch/ignite/blob/master/tests/ignite/contrib/metrics/regression/test_canberra_metric.py#L99\r\n\n", "before_files": [{"content": "from typing import Callable\n\nimport torch\n\nfrom ignite.metrics import EpochMetric\n\n\ndef average_precision_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float:\n try:\n from sklearn.metrics import average_precision_score\n except ImportError:\n raise RuntimeError(\"This contrib module requires sklearn to be installed.\")\n\n y_true = y_targets.numpy()\n y_pred = y_preds.numpy()\n return average_precision_score(y_true, y_pred)\n\n\nclass AveragePrecision(EpochMetric):\n \"\"\"Computes Average Precision accumulating predictions and the ground-truth during an epoch\n and applying `sklearn.metrics.average_precision_score <https://scikit-learn.org/stable/modules/generated/\n sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score>`_ .\n\n Args:\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n check_compute_fn: Default False. If True, `average_precision_score\n <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html\n #sklearn.metrics.average_precision_score>`_ is run on the first batch of data to ensure there are\n no issues. User will be warned in case there are any issues computing the function.\n\n AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or\n confidence values. To apply an activation to y_pred, use output_transform as shown below:\n\n .. code-block:: python\n\n def activated_output_transform(output):\n y_pred, y = output\n y_pred = torch.softmax(y_pred, dim=1)\n return y_pred, y\n\n avg_precision = AveragePrecision(activated_output_transform)\n\n \"\"\"\n\n def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None:\n super(AveragePrecision, self).__init__(\n average_precision_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn\n )\n", "path": "ignite/contrib/metrics/average_precision.py"}]}
1,302
534
gh_patches_debug_37409
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-2875
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider tacocabana is broken During the global build at 2021-05-26-14-42-23, spider **tacocabana** failed with **0 features** and **1 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tacocabana.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tacocabana.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tacocabana.geojson)) </issue> <code> [start of locations/spiders/tacocabana.py] 1 # -*- coding: utf-8 -*- 2 import scrapy 3 import json 4 import re 5 6 from locations.items import GeojsonPointItem 7 8 class TacocabanaSpider(scrapy.Spider): 9 name = "tacocabana" 10 item_attributes = { 'brand': "Taco Cabana" } 11 allowed_domains = ["www.tacocabana.com"] 12 start_urls = ( 13 "http://www.tacocabana.com/wp-admin/admin-ajax.php?action=get_ajax_processor&processor=get-locations&queryType=&postID=816", 14 ) 15 16 def parse(self, response): 17 data = json.loads(re.sub(r"\s<.*?>.*<.*?>\s", "", response.body_as_unicode())) 18 19 for store in data: 20 properties = { 21 "phone" : store["phone_number"], 22 "ref" : str(store["locator_store_number"]), 23 "name" : store["post_title"], 24 "opening_hours" : store["hours"], 25 "website" : store["permalink"], 26 "lat" : store["x_coordinate"], 27 "lon" : store["y_coordinate"], 28 "street" : store["street_address_1"] + store["street_address_2"], 29 "city" : store["city"], 30 "state" : store["state"], 31 "postcode" : store["zip_code"] 32 } 33 34 yield GeojsonPointItem(**properties) 35 36 else: 37 self.logger.info("No results") 38 [end of locations/spiders/tacocabana.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/tacocabana.py b/locations/spiders/tacocabana.py --- a/locations/spiders/tacocabana.py +++ b/locations/spiders/tacocabana.py @@ -1,37 +1,55 @@ # -*- coding: utf-8 -*- import scrapy -import json -import re from locations.items import GeojsonPointItem + class TacocabanaSpider(scrapy.Spider): name = "tacocabana" - item_attributes = { 'brand': "Taco Cabana" } - allowed_domains = ["www.tacocabana.com"] - start_urls = ( - "http://www.tacocabana.com/wp-admin/admin-ajax.php?action=get_ajax_processor&processor=get-locations&queryType=&postID=816", - ) - + item_attributes = {"brand": "Taco Cabana"} + allowed_domains = ["api.koala.fuzzhq.com"] + + def start_requests(self): + yield scrapy.http.JsonRequest( + "https://api.koala.fuzzhq.com/oauth/access_token", + data={ + "client_id": "3nA4STkGif0fZGApqxMlVewy3h8HN6Fsy7jVOACP", + "client_secret": "8oBU5gWiNg04zYzz61hN3ETrTIzvmbGyeLCX0F1s", + "grant_type": "ordering_app_credentials", + "scope": "group:ordering_app", + }, + callback=self.fetch_locations, + ) + + def fetch_locations(self, response): + self.access_token = response.json()["access_token"] + yield self.request( + "https://api.koala.fuzzhq.com/v1/ordering/store-locations/?include[]=operating_hours&include[]=attributes&per_page=50" + ) + + def request(self, url): + return scrapy.Request( + url, headers={"Authorization": f"Bearer {self.access_token}"} + ) + def parse(self, response): - data = json.loads(re.sub(r"\s<.*?>.*<.*?>\s", "", response.body_as_unicode())) + data = response.json() - for store in data: + for store in data["data"]: properties = { - "phone" : store["phone_number"], - "ref" : str(store["locator_store_number"]), - "name" : store["post_title"], - "opening_hours" : store["hours"], - "website" : store["permalink"], - "lat" : store["x_coordinate"], - "lon" : store["y_coordinate"], - "street" : store["street_address_1"] + store["street_address_2"], - "city" : store["city"], - "state" : store["state"], - "postcode" : store["zip_code"] + "website": f'https://olo.tacocabana.com/menu/{store["slug"]}?showInfoModal=true', + "ref": store["brand_id"], + "lat": store["latitude"], + "lon": store["longitude"], + "addr_full": store["street_address"], + "city": store["city"], + "state": store["cached_data"]["state"], + "country": store["country"], + "postcode": store["zip_code"], + "phone": store["phone_number"], } - yield GeojsonPointItem(**properties) - - else: - self.logger.info("No results") + + next_url = data["meta"]["pagination"]["links"]["next"] + if next_url: + yield self.request(next_url)
{"golden_diff": "diff --git a/locations/spiders/tacocabana.py b/locations/spiders/tacocabana.py\n--- a/locations/spiders/tacocabana.py\n+++ b/locations/spiders/tacocabana.py\n@@ -1,37 +1,55 @@\n # -*- coding: utf-8 -*-\n import scrapy\n-import json\n-import re\n \n from locations.items import GeojsonPointItem\n \n+\n class TacocabanaSpider(scrapy.Spider):\n name = \"tacocabana\"\n- item_attributes = { 'brand': \"Taco Cabana\" }\n- allowed_domains = [\"www.tacocabana.com\"]\n- start_urls = (\n- \"http://www.tacocabana.com/wp-admin/admin-ajax.php?action=get_ajax_processor&processor=get-locations&queryType=&postID=816\",\n- )\n- \n+ item_attributes = {\"brand\": \"Taco Cabana\"}\n+ allowed_domains = [\"api.koala.fuzzhq.com\"]\n+\n+ def start_requests(self):\n+ yield scrapy.http.JsonRequest(\n+ \"https://api.koala.fuzzhq.com/oauth/access_token\",\n+ data={\n+ \"client_id\": \"3nA4STkGif0fZGApqxMlVewy3h8HN6Fsy7jVOACP\",\n+ \"client_secret\": \"8oBU5gWiNg04zYzz61hN3ETrTIzvmbGyeLCX0F1s\",\n+ \"grant_type\": \"ordering_app_credentials\",\n+ \"scope\": \"group:ordering_app\",\n+ },\n+ callback=self.fetch_locations,\n+ )\n+\n+ def fetch_locations(self, response):\n+ self.access_token = response.json()[\"access_token\"]\n+ yield self.request(\n+ \"https://api.koala.fuzzhq.com/v1/ordering/store-locations/?include[]=operating_hours&include[]=attributes&per_page=50\"\n+ )\n+\n+ def request(self, url):\n+ return scrapy.Request(\n+ url, headers={\"Authorization\": f\"Bearer {self.access_token}\"}\n+ )\n+\n def parse(self, response):\n- data = json.loads(re.sub(r\"\\s<.*?>.*<.*?>\\s\", \"\", response.body_as_unicode()))\n+ data = response.json()\n \n- for store in data:\n+ for store in data[\"data\"]:\n properties = {\n- \"phone\" : store[\"phone_number\"],\n- \"ref\" : str(store[\"locator_store_number\"]),\n- \"name\" : store[\"post_title\"],\n- \"opening_hours\" : store[\"hours\"],\n- \"website\" : store[\"permalink\"],\n- \"lat\" : store[\"x_coordinate\"],\n- \"lon\" : store[\"y_coordinate\"],\n- \"street\" : store[\"street_address_1\"] + store[\"street_address_2\"],\n- \"city\" : store[\"city\"],\n- \"state\" : store[\"state\"],\n- \"postcode\" : store[\"zip_code\"]\n+ \"website\": f'https://olo.tacocabana.com/menu/{store[\"slug\"]}?showInfoModal=true',\n+ \"ref\": store[\"brand_id\"],\n+ \"lat\": store[\"latitude\"],\n+ \"lon\": store[\"longitude\"],\n+ \"addr_full\": store[\"street_address\"],\n+ \"city\": store[\"city\"],\n+ \"state\": store[\"cached_data\"][\"state\"],\n+ \"country\": store[\"country\"],\n+ \"postcode\": store[\"zip_code\"],\n+ \"phone\": store[\"phone_number\"],\n }\n- \n yield GeojsonPointItem(**properties)\n- \n- else:\n- self.logger.info(\"No results\")\n+\n+ next_url = data[\"meta\"][\"pagination\"][\"links\"][\"next\"]\n+ if next_url:\n+ yield self.request(next_url)\n", "issue": "Spider tacocabana is broken\nDuring the global build at 2021-05-26-14-42-23, spider **tacocabana** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tacocabana.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tacocabana.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tacocabana.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\n\nclass TacocabanaSpider(scrapy.Spider):\n name = \"tacocabana\"\n item_attributes = { 'brand': \"Taco Cabana\" }\n allowed_domains = [\"www.tacocabana.com\"]\n start_urls = (\n \"http://www.tacocabana.com/wp-admin/admin-ajax.php?action=get_ajax_processor&processor=get-locations&queryType=&postID=816\",\n )\n \n def parse(self, response):\n data = json.loads(re.sub(r\"\\s<.*?>.*<.*?>\\s\", \"\", response.body_as_unicode()))\n\n for store in data:\n properties = {\n \"phone\" : store[\"phone_number\"],\n \"ref\" : str(store[\"locator_store_number\"]),\n \"name\" : store[\"post_title\"],\n \"opening_hours\" : store[\"hours\"],\n \"website\" : store[\"permalink\"],\n \"lat\" : store[\"x_coordinate\"],\n \"lon\" : store[\"y_coordinate\"],\n \"street\" : store[\"street_address_1\"] + store[\"street_address_2\"],\n \"city\" : store[\"city\"],\n \"state\" : store[\"state\"],\n \"postcode\" : store[\"zip_code\"]\n }\n \n yield GeojsonPointItem(**properties)\n \n else:\n self.logger.info(\"No results\")\n", "path": "locations/spiders/tacocabana.py"}]}
1,112
840
gh_patches_debug_2630
rasdani/github-patches
git_diff
joke2k__faker-826
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pt_BR email not returning valid email addresses When creating a fake Factory with the pt_BR it is not returning valid email addresses. Example: ``` melocauã@bol.com.br joã[email protected] laví[email protected] vitó[email protected] ``` </issue> <code> [start of faker/providers/internet/pt_BR/__init__.py] 1 # coding=utf-8 2 from __future__ import unicode_literals 3 from .. import Provider as InternetProvider 4 5 6 class Provider(InternetProvider): 7 safe_email_tlds = ('com', 'net', 'br', 'br') 8 free_email_domains = ( 9 'gmail.com', 10 'hotmail.com', 11 'yahoo.com.br', 12 'uol.com.br', 13 'bol.com.br', 14 'ig.com.br') 15 tlds = ('com', 'com', 'com', 'net', 'org', 'br', 'br', 'br') 16 [end of faker/providers/internet/pt_BR/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/faker/providers/internet/pt_BR/__init__.py b/faker/providers/internet/pt_BR/__init__.py --- a/faker/providers/internet/pt_BR/__init__.py +++ b/faker/providers/internet/pt_BR/__init__.py @@ -13,3 +13,11 @@ 'bol.com.br', 'ig.com.br') tlds = ('com', 'com', 'com', 'net', 'org', 'br', 'br', 'br') + replacements = ( + ('à', 'a'), ('â', 'a'), ('ã', 'a'), + ('ç', 'c'), + ('é', 'e'), ('ê', 'e'), + ('í', 'i'), + ('ô', 'o'), ('ö', 'o'), ('õ', 'o'), + ('ú', 'u'), + )
{"golden_diff": "diff --git a/faker/providers/internet/pt_BR/__init__.py b/faker/providers/internet/pt_BR/__init__.py\n--- a/faker/providers/internet/pt_BR/__init__.py\n+++ b/faker/providers/internet/pt_BR/__init__.py\n@@ -13,3 +13,11 @@\n 'bol.com.br',\n 'ig.com.br')\n tlds = ('com', 'com', 'com', 'net', 'org', 'br', 'br', 'br')\n+ replacements = (\n+ ('\u00e0', 'a'), ('\u00e2', 'a'), ('\u00e3', 'a'),\n+ ('\u00e7', 'c'),\n+ ('\u00e9', 'e'), ('\u00ea', 'e'),\n+ ('\u00ed', 'i'),\n+ ('\u00f4', 'o'), ('\u00f6', 'o'), ('\u00f5', 'o'),\n+ ('\u00fa', 'u'),\n+ )\n", "issue": "pt_BR email not returning valid email addresses\nWhen creating a fake Factory with the pt_BR it is not returning valid email addresses.\r\nExample:\r\n```\r\nmelocau\[email protected]\r\njo\[email protected]\r\nlav\[email protected]\r\nvit\[email protected]\r\n```\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom .. import Provider as InternetProvider\n\n\nclass Provider(InternetProvider):\n safe_email_tlds = ('com', 'net', 'br', 'br')\n free_email_domains = (\n 'gmail.com',\n 'hotmail.com',\n 'yahoo.com.br',\n 'uol.com.br',\n 'bol.com.br',\n 'ig.com.br')\n tlds = ('com', 'com', 'com', 'net', 'org', 'br', 'br', 'br')\n", "path": "faker/providers/internet/pt_BR/__init__.py"}]}
760
200
gh_patches_debug_8784
rasdani/github-patches
git_diff
digitalfabrik__integreat-cms-632
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> get_last_root_page template tag not working ### Describe the Bug <!-- A clear and concise description of what the bug is. --> The `get_last_root_page` template tag is not working: https://github.com/Integreat/integreat-cms/blob/develop/src/cms/templatetags/page_filters.py#L10-L20 This is because in the page QuerySet aggregation, the difference() function is used: https://github.com/Integreat/integreat-cms/blob/a285cb3c74b2a2c501147076338e2b2a70c89bd6/src/cms/models/regions/region.py#L177 After difference(), it is not possible to use filter(), see https://docs.djangoproject.com/en/2.2/ref/models/querysets/#union This leads to the last drop-region in the page tree being incorrectly tied to the last page in the list, not necessarily the last root page. ### Steps to Reproduce 1. Go to Page tree 2. Drag & drop page to most bottom line ### Expected Behavior <!-- A clear and concise description of what you expected to happen. --> The page should be placed next to the last root page ### Actual Behavior <!-- A clear and concise description of what actually happened. --> The page is placed next to the last page in the list ### Additional Information <!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. --> </issue> <code> [start of src/cms/templatetags/page_filters.py] 1 """ 2 This is a collection of tags and filters for :class:`~cms.models.pages.page.Page` objects. 3 """ 4 from django import template 5 6 register = template.Library() 7 8 9 @register.simple_tag 10 def get_last_root_page(pages): 11 """ 12 This tag returns the last page on the root level. 13 14 :param pages: The requested page tree 15 :type pages: ~mptt.querysets.TreeQuerySet [ ~cms.models.pages.page.Page ] 16 17 :return: The last root page of the given :class:`~django.db.models.query.QuerySet` 18 :rtype: ~cms.models.pages.page.Page 19 """ 20 return pages.filter(parent=None).last() 21 [end of src/cms/templatetags/page_filters.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cms/templatetags/page_filters.py b/src/cms/templatetags/page_filters.py --- a/src/cms/templatetags/page_filters.py +++ b/src/cms/templatetags/page_filters.py @@ -12,9 +12,9 @@ This tag returns the last page on the root level. :param pages: The requested page tree - :type pages: ~mptt.querysets.TreeQuerySet [ ~cms.models.pages.page.Page ] + :type pages: list [ ~cms.models.pages.page.Page ] - :return: The last root page of the given :class:`~django.db.models.query.QuerySet` + :return: The last root page of the given page list :rtype: ~cms.models.pages.page.Page """ - return pages.filter(parent=None).last() + return list(filter(lambda p: not p.parent, pages))[-1]
{"golden_diff": "diff --git a/src/cms/templatetags/page_filters.py b/src/cms/templatetags/page_filters.py\n--- a/src/cms/templatetags/page_filters.py\n+++ b/src/cms/templatetags/page_filters.py\n@@ -12,9 +12,9 @@\n This tag returns the last page on the root level.\n \n :param pages: The requested page tree\n- :type pages: ~mptt.querysets.TreeQuerySet [ ~cms.models.pages.page.Page ]\n+ :type pages: list [ ~cms.models.pages.page.Page ]\n \n- :return: The last root page of the given :class:`~django.db.models.query.QuerySet`\n+ :return: The last root page of the given page list\n :rtype: ~cms.models.pages.page.Page\n \"\"\"\n- return pages.filter(parent=None).last()\n+ return list(filter(lambda p: not p.parent, pages))[-1]\n", "issue": "get_last_root_page template tag not working\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nThe `get_last_root_page` template tag is not working:\r\nhttps://github.com/Integreat/integreat-cms/blob/develop/src/cms/templatetags/page_filters.py#L10-L20\r\n\r\nThis is because in the page QuerySet aggregation, the difference() function is used:\r\nhttps://github.com/Integreat/integreat-cms/blob/a285cb3c74b2a2c501147076338e2b2a70c89bd6/src/cms/models/regions/region.py#L177\r\n\r\nAfter difference(), it is not possible to use filter(), see https://docs.djangoproject.com/en/2.2/ref/models/querysets/#union\r\n\r\nThis leads to the last drop-region in the page tree being incorrectly tied to the last page in the list, not necessarily the last root page.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Go to Page tree\r\n2. Drag & drop page to most bottom line\r\n\r\n### Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe page should be placed next to the last root page\r\n\r\n### Actual Behavior\r\n<!-- A clear and concise description of what actually happened. -->\r\nThe page is placed next to the last page in the list\r\n\r\n### Additional Information\r\n<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThis is a collection of tags and filters for :class:`~cms.models.pages.page.Page` objects.\n\"\"\"\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]_tag\ndef get_last_root_page(pages):\n \"\"\"\n This tag returns the last page on the root level.\n\n :param pages: The requested page tree\n :type pages: ~mptt.querysets.TreeQuerySet [ ~cms.models.pages.page.Page ]\n\n :return: The last root page of the given :class:`~django.db.models.query.QuerySet`\n :rtype: ~cms.models.pages.page.Page\n \"\"\"\n return pages.filter(parent=None).last()\n", "path": "src/cms/templatetags/page_filters.py"}]}
1,042
204
gh_patches_debug_21667
rasdani/github-patches
git_diff
fedora-infra__bodhi-2005
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> bodhi-dequqe-stable dies if any update in the queue is no longer eligible to go stable QuLogic from Freenode reported today that batched updates didn't go stable at 03:00 UTC like they should have. I confirmed that the cron job ran, but I didn't see any notes about its output. I then ran the command by hand and received this output: ``` [bowlofeggs@bodhi-backend01 ~][PROD]$ sudo -u apache /usr/bin/bodhi-dequeue-stable No handlers could be found for logger "bodhi.server" This update has not yet met the minimum testing requirements defined in the <a href="https://fedoraproject.org/wiki/Package_update_acceptance_criteria">Package Update Acceptance Criteria</a> ``` The [```dequeue_stable()```](https://github.com/fedora-infra/bodhi/blob/3.0.0/bodhi/server/scripts/dequeue_stable.py#L28-L46) function runs a large transaction with only a single try/except. It seems that some update in the queue no longer meets testing requirements (probably due to receiving a -1 karma after going to batched) and is raising an Exception when the tool attempts to mark it for stable. Since there is only one try/except handler, this causes the whole transaction to be rolled back. It should be easy to fix this - we just need a try/except around each update. Thanks to QuLogic from Freenode for reporting this issue to me. </issue> <code> [start of bodhi/server/scripts/dequeue_stable.py] 1 # -*- coding: utf-8 -*- 2 # Copyright © 2017 Caleigh Runge-Hottman 3 # 4 # This file is part of Bodhi. 5 # 6 # This program is free software; you can redistribute it and/or 7 # modify it under the terms of the GNU General Public License 8 # as published by the Free Software Foundation; either version 2 9 # of the License, or (at your option) any later version. 10 # 11 # This program is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU General Public License for more details. 15 # 16 # You should have received a copy of the GNU General Public License 17 # along with this program; if not, write to the Free Software 18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 19 """This script is responsible for moving all updates with a batched request to a stable request.""" 20 21 import sys 22 23 import click 24 25 from bodhi.server import buildsys, config, models, Session, initialize_db 26 27 28 @click.command() 29 @click.version_option(message='%(version)s') 30 def dequeue_stable(): 31 """Convert all batched requests to stable requests.""" 32 initialize_db(config.config) 33 buildsys.setup_buildsystem(config.config) 34 db = Session() 35 36 try: 37 batched = db.query(models.Update).filter_by(request=models.UpdateRequest.batched).all() 38 for update in batched: 39 update.set_request(db, models.UpdateRequest.stable, u'bodhi') 40 db.commit() 41 42 except Exception as e: 43 print(str(e)) 44 db.rollback() 45 Session.remove() 46 sys.exit(1) 47 [end of bodhi/server/scripts/dequeue_stable.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bodhi/server/scripts/dequeue_stable.py b/bodhi/server/scripts/dequeue_stable.py --- a/bodhi/server/scripts/dequeue_stable.py +++ b/bodhi/server/scripts/dequeue_stable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright © 2017 Caleigh Runge-Hottman +# Copyright © 2017 Caleigh Runge-Hottman and Red Hat, Inc. # # This file is part of Bodhi. # @@ -36,11 +36,17 @@ try: batched = db.query(models.Update).filter_by(request=models.UpdateRequest.batched).all() for update in batched: - update.set_request(db, models.UpdateRequest.stable, u'bodhi') - db.commit() - + try: + update.set_request(db, models.UpdateRequest.stable, u'bodhi') + db.commit() + except Exception as e: + print('Unable to stabilize {}: {}'.format(update.alias, str(e))) + db.rollback() + msg = u"Bodhi is unable to request this update for stabilization: {}" + update.comment(db, msg.format(str(e)), author=u'bodhi') + db.commit() except Exception as e: print(str(e)) - db.rollback() - Session.remove() sys.exit(1) + finally: + Session.remove()
{"golden_diff": "diff --git a/bodhi/server/scripts/dequeue_stable.py b/bodhi/server/scripts/dequeue_stable.py\n--- a/bodhi/server/scripts/dequeue_stable.py\n+++ b/bodhi/server/scripts/dequeue_stable.py\n@@ -1,5 +1,5 @@\n # -*- coding: utf-8 -*-\n-# Copyright \u00a9 2017 Caleigh Runge-Hottman\n+# Copyright \u00a9 2017 Caleigh Runge-Hottman and Red Hat, Inc.\n #\n # This file is part of Bodhi.\n #\n@@ -36,11 +36,17 @@\n try:\n batched = db.query(models.Update).filter_by(request=models.UpdateRequest.batched).all()\n for update in batched:\n- update.set_request(db, models.UpdateRequest.stable, u'bodhi')\n- db.commit()\n-\n+ try:\n+ update.set_request(db, models.UpdateRequest.stable, u'bodhi')\n+ db.commit()\n+ except Exception as e:\n+ print('Unable to stabilize {}: {}'.format(update.alias, str(e)))\n+ db.rollback()\n+ msg = u\"Bodhi is unable to request this update for stabilization: {}\"\n+ update.comment(db, msg.format(str(e)), author=u'bodhi')\n+ db.commit()\n except Exception as e:\n print(str(e))\n- db.rollback()\n- Session.remove()\n sys.exit(1)\n+ finally:\n+ Session.remove()\n", "issue": "bodhi-dequqe-stable dies if any update in the queue is no longer eligible to go stable\nQuLogic from Freenode reported today that batched updates didn't go stable at 03:00 UTC like they should have. I confirmed that the cron job ran, but I didn't see any notes about its output. I then ran the command by hand and received this output:\r\n\r\n```\r\n[bowlofeggs@bodhi-backend01 ~][PROD]$ sudo -u apache /usr/bin/bodhi-dequeue-stable\r\nNo handlers could be found for logger \"bodhi.server\"\r\nThis update has not yet met the minimum testing requirements defined in the <a href=\"https://fedoraproject.org/wiki/Package_update_acceptance_criteria\">Package Update Acceptance Criteria</a>\r\n```\r\n\r\nThe [```dequeue_stable()```](https://github.com/fedora-infra/bodhi/blob/3.0.0/bodhi/server/scripts/dequeue_stable.py#L28-L46) function runs a large transaction with only a single try/except. It seems that some update in the queue no longer meets testing requirements (probably due to receiving a -1 karma after going to batched) and is raising an Exception when the tool attempts to mark it for stable. Since there is only one try/except handler, this causes the whole transaction to be rolled back.\r\n\r\nIt should be easy to fix this - we just need a try/except around each update.\r\n\r\nThanks to QuLogic from Freenode for reporting this issue to me.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright \u00a9 2017 Caleigh Runge-Hottman\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"This script is responsible for moving all updates with a batched request to a stable request.\"\"\"\n\nimport sys\n\nimport click\n\nfrom bodhi.server import buildsys, config, models, Session, initialize_db\n\n\[email protected]()\[email protected]_option(message='%(version)s')\ndef dequeue_stable():\n \"\"\"Convert all batched requests to stable requests.\"\"\"\n initialize_db(config.config)\n buildsys.setup_buildsystem(config.config)\n db = Session()\n\n try:\n batched = db.query(models.Update).filter_by(request=models.UpdateRequest.batched).all()\n for update in batched:\n update.set_request(db, models.UpdateRequest.stable, u'bodhi')\n db.commit()\n\n except Exception as e:\n print(str(e))\n db.rollback()\n Session.remove()\n sys.exit(1)\n", "path": "bodhi/server/scripts/dequeue_stable.py"}]}
1,337
320
gh_patches_debug_4008
rasdani/github-patches
git_diff
zestedesavoir__zds-site-2705
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Derniers sujets sur la Home : la date sur mobile n'est pas optimisée Sur mobile on à en général pas beaucoup de place. Et il faudrait éviter d'afficher la date literralle pour optimiser la place. Cf screen (paysage). ![13 mai 2015 181424 gmt 0200](https://cloud.githubusercontent.com/assets/6066015/7615213/59898462-f99c-11e4-866d-a136e7446c5b.jpg) </issue> <code> [start of zds/featured/forms.py] 1 # coding: utf-8 2 from crispy_forms.bootstrap import StrictButton 3 from crispy_forms.helper import FormHelper 4 from crispy_forms.layout import Layout, Field, ButtonHolder 5 from django import forms 6 from django.core.urlresolvers import reverse 7 from django.utils.translation import ugettext_lazy as _ 8 9 from zds.featured.models import FeaturedResource, FeaturedMessage 10 11 12 class FeaturedResourceForm(forms.ModelForm): 13 class Meta: 14 model = FeaturedResource 15 16 fields = ['title', 'type', 'authors', 'image_url', 'url'] 17 18 title = forms.CharField( 19 label=_(u'Titre'), 20 max_length=FeaturedResource._meta.get_field('title').max_length, 21 widget=forms.TextInput( 22 attrs={ 23 'required': 'required', 24 } 25 ) 26 ) 27 28 type = forms.CharField( 29 label=_(u'Type'), 30 max_length=FeaturedResource._meta.get_field('type').max_length, 31 widget=forms.TextInput( 32 attrs={ 33 'placeholder': _(u'ex: Un projet, un article, un tutoriel...'), 34 'required': 'required', 35 } 36 ) 37 ) 38 39 authors = forms.CharField( 40 label=_('Auteurs'), 41 widget=forms.TextInput( 42 attrs={ 43 'placeholder': _(u'Les auteurs doivent être séparés par une virgule.'), 44 'required': 'required', 45 'data-autocomplete': '{ "type": "multiple" }' 46 } 47 ) 48 ) 49 50 image_url = forms.CharField( 51 label='Image URL', 52 max_length=FeaturedResource._meta.get_field('image_url').max_length, 53 widget=forms.TextInput( 54 attrs={ 55 'placeholder': _(u'Lien vers l\'url de l\'image de la une.') 56 } 57 ) 58 ) 59 60 url = forms.CharField( 61 label='URL', 62 max_length=FeaturedResource._meta.get_field('url').max_length, 63 widget=forms.TextInput( 64 attrs={ 65 'placeholder': _(u'Lien vers l\'url de la ressource.') 66 } 67 ) 68 ) 69 70 def __init__(self, *args, **kwargs): 71 super(FeaturedResourceForm, self).__init__(*args, **kwargs) 72 self.helper = FormHelper() 73 self.helper.form_class = 'content-wrapper' 74 self.helper.form_method = 'post' 75 self.helper.form_action = reverse('featured-resource-create') 76 77 self.helper.layout = Layout( 78 Field('title'), 79 Field('type'), 80 Field('authors'), 81 Field('image_url'), 82 Field('url'), 83 ButtonHolder( 84 StrictButton(_(u'Enregistrer'), type='submit'), 85 ), 86 ) 87 88 89 class FeaturedMessageForm(forms.ModelForm): 90 class Meta: 91 model = FeaturedMessage 92 93 fields = ['message', 'url'] 94 95 message = forms.CharField( 96 label=_(u'Message'), 97 max_length=FeaturedMessage._meta.get_field('message').max_length, 98 widget=forms.TextInput( 99 attrs={ 100 'required': 'required', 101 } 102 ) 103 ) 104 105 url = forms.CharField( 106 label=_(u'URL'), 107 max_length=FeaturedMessage._meta.get_field('url').max_length, 108 widget=forms.TextInput( 109 attrs={ 110 'placeholder': _(u'Lien vers l\'url du message.'), 111 'required': 'required', 112 } 113 ) 114 ) 115 116 def __init__(self, *args, **kwargs): 117 super(FeaturedMessageForm, self).__init__(*args, **kwargs) 118 self.helper = FormHelper() 119 self.helper.form_class = 'content-wrapper' 120 self.helper.form_method = 'post' 121 self.helper.form_action = reverse('featured-message-create') 122 123 self.helper.layout = Layout( 124 Field('message'), 125 Field('url'), 126 ButtonHolder( 127 StrictButton(_(u'Enregistrer'), type='submit'), 128 ), 129 ) 130 [end of zds/featured/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/featured/forms.py b/zds/featured/forms.py --- a/zds/featured/forms.py +++ b/zds/featured/forms.py @@ -52,7 +52,7 @@ max_length=FeaturedResource._meta.get_field('image_url').max_length, widget=forms.TextInput( attrs={ - 'placeholder': _(u'Lien vers l\'url de l\'image de la une.') + 'placeholder': _(u'Lien vers l\'url de l\'image de la une (dimensions: 228x228).') } ) )
{"golden_diff": "diff --git a/zds/featured/forms.py b/zds/featured/forms.py\n--- a/zds/featured/forms.py\n+++ b/zds/featured/forms.py\n@@ -52,7 +52,7 @@\n max_length=FeaturedResource._meta.get_field('image_url').max_length,\n widget=forms.TextInput(\n attrs={\n- 'placeholder': _(u'Lien vers l\\'url de l\\'image de la une.')\n+ 'placeholder': _(u'Lien vers l\\'url de l\\'image de la une (dimensions: 228x228).')\n }\n )\n )\n", "issue": "Derniers sujets sur la Home : la date sur mobile n'est pas optimis\u00e9e \nSur mobile on \u00e0 en g\u00e9n\u00e9ral pas beaucoup de place. Et il faudrait \u00e9viter d'afficher la date literralle pour optimiser la place. Cf screen (paysage).\n\n![13 mai 2015 181424 gmt 0200](https://cloud.githubusercontent.com/assets/6066015/7615213/59898462-f99c-11e4-866d-a136e7446c5b.jpg)\n\n", "before_files": [{"content": "# coding: utf-8\nfrom crispy_forms.bootstrap import StrictButton\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Field, ButtonHolder\nfrom django import forms\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom zds.featured.models import FeaturedResource, FeaturedMessage\n\n\nclass FeaturedResourceForm(forms.ModelForm):\n class Meta:\n model = FeaturedResource\n\n fields = ['title', 'type', 'authors', 'image_url', 'url']\n\n title = forms.CharField(\n label=_(u'Titre'),\n max_length=FeaturedResource._meta.get_field('title').max_length,\n widget=forms.TextInput(\n attrs={\n 'required': 'required',\n }\n )\n )\n\n type = forms.CharField(\n label=_(u'Type'),\n max_length=FeaturedResource._meta.get_field('type').max_length,\n widget=forms.TextInput(\n attrs={\n 'placeholder': _(u'ex: Un projet, un article, un tutoriel...'),\n 'required': 'required',\n }\n )\n )\n\n authors = forms.CharField(\n label=_('Auteurs'),\n widget=forms.TextInput(\n attrs={\n 'placeholder': _(u'Les auteurs doivent \u00eatre s\u00e9par\u00e9s par une virgule.'),\n 'required': 'required',\n 'data-autocomplete': '{ \"type\": \"multiple\" }'\n }\n )\n )\n\n image_url = forms.CharField(\n label='Image URL',\n max_length=FeaturedResource._meta.get_field('image_url').max_length,\n widget=forms.TextInput(\n attrs={\n 'placeholder': _(u'Lien vers l\\'url de l\\'image de la une.')\n }\n )\n )\n\n url = forms.CharField(\n label='URL',\n max_length=FeaturedResource._meta.get_field('url').max_length,\n widget=forms.TextInput(\n attrs={\n 'placeholder': _(u'Lien vers l\\'url de la ressource.')\n }\n )\n )\n\n def __init__(self, *args, **kwargs):\n super(FeaturedResourceForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'content-wrapper'\n self.helper.form_method = 'post'\n self.helper.form_action = reverse('featured-resource-create')\n\n self.helper.layout = Layout(\n Field('title'),\n Field('type'),\n Field('authors'),\n Field('image_url'),\n Field('url'),\n ButtonHolder(\n StrictButton(_(u'Enregistrer'), type='submit'),\n ),\n )\n\n\nclass FeaturedMessageForm(forms.ModelForm):\n class Meta:\n model = FeaturedMessage\n\n fields = ['message', 'url']\n\n message = forms.CharField(\n label=_(u'Message'),\n max_length=FeaturedMessage._meta.get_field('message').max_length,\n widget=forms.TextInput(\n attrs={\n 'required': 'required',\n }\n )\n )\n\n url = forms.CharField(\n label=_(u'URL'),\n max_length=FeaturedMessage._meta.get_field('url').max_length,\n widget=forms.TextInput(\n attrs={\n 'placeholder': _(u'Lien vers l\\'url du message.'),\n 'required': 'required',\n }\n )\n )\n\n def __init__(self, *args, **kwargs):\n super(FeaturedMessageForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'content-wrapper'\n self.helper.form_method = 'post'\n self.helper.form_action = reverse('featured-message-create')\n\n self.helper.layout = Layout(\n Field('message'),\n Field('url'),\n ButtonHolder(\n StrictButton(_(u'Enregistrer'), type='submit'),\n ),\n )\n", "path": "zds/featured/forms.py"}]}
1,773
134
gh_patches_debug_7194
rasdani/github-patches
git_diff
instadeepai__Mava-1041
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG]: Flasbax bug on LBF (type issue) ### Describe the bug In the AgentID wrapper, the new_agents_view type is not enforced to be consistent, so for LBF with flashbax a dtype error emerges. ### To Reproduce Steps to reproduce the behavior: 1. Run LBF with flashbax. ### Expected behavior Expected the observation to be added easily to the buffer. ### Context (Environment) - Updated jumanji ### Additional context This is somewhat an exercise in opening an issue! ### Possible Solution Will make a PR soon! Basically a cast to the agents_view type. </issue> <code> [start of mava/wrappers/observation.py] 1 # Copyright 2022 InstaDeep Ltd. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import Tuple, Union 16 17 import chex 18 import jax.numpy as jnp 19 from jumanji import specs 20 from jumanji.env import Environment 21 from jumanji.types import TimeStep 22 from jumanji.wrappers import Wrapper 23 24 from mava.types import Observation, ObservationGlobalState, State 25 26 27 class AgentIDWrapper(Wrapper): 28 """A wrapper to add a one-hot vector as agent IDs to the original observation. 29 It can be useful in multi-agent environments where agents require unique identification. 30 """ 31 32 def __init__(self, env: Environment): 33 super().__init__(env) 34 35 def _add_agent_ids( 36 self, timestep: TimeStep, num_agents: int 37 ) -> Union[Observation, ObservationGlobalState]: 38 """Adds agent IDs to the observation.""" 39 obs = timestep.observation 40 agent_ids = jnp.eye(num_agents) 41 agents_view = jnp.concatenate([agent_ids, obs.agents_view], axis=-1) 42 43 return obs._replace(agents_view=agents_view) # type: ignore 44 45 def reset(self, key: chex.PRNGKey) -> Tuple[State, TimeStep]: 46 """Reset the environment.""" 47 state, timestep = self._env.reset(key) 48 timestep.observation = self._add_agent_ids(timestep, self._env.num_agents) 49 50 return state, timestep 51 52 def step( 53 self, 54 state: State, 55 action: chex.Array, 56 ) -> Tuple[State, TimeStep]: 57 """Step the environment.""" 58 state, timestep = self._env.step(state, action) 59 timestep.observation = self._add_agent_ids(timestep, self._env.num_agents) 60 61 return state, timestep 62 63 def observation_spec( 64 self, 65 ) -> Union[specs.Spec[Observation], specs.Spec[ObservationGlobalState]]: 66 """Specification of the observation of the selected environment.""" 67 obs_spec = self._env.observation_spec() 68 num_obs_features = obs_spec.agents_view.shape[-1] + self._env.num_agents 69 dtype = obs_spec.agents_view.dtype 70 agents_view = specs.Array((self._env.num_agents, num_obs_features), dtype, "agents_view") 71 72 return obs_spec.replace(agents_view=agents_view) 73 74 75 class GlobalStateWrapper(Wrapper): 76 """Wrapper for adding global state to an environment that follows the mava API. 77 78 The wrapper includes a global environment state to be used by the centralised critic. 79 Note here that since most environments do not have a global state, we create one 80 by concatenating the observations of all agents. 81 """ 82 83 def modify_timestep(self, timestep: TimeStep) -> TimeStep[ObservationGlobalState]: 84 global_state = jnp.concatenate(timestep.observation.agents_view, axis=0) 85 global_state = jnp.tile(global_state, (self._env.num_agents, 1)) 86 87 observation = ObservationGlobalState( 88 global_state=global_state, 89 agents_view=timestep.observation.agents_view, 90 action_mask=timestep.observation.action_mask, 91 step_count=timestep.observation.step_count, 92 ) 93 94 return timestep.replace(observation=observation) 95 96 def reset(self, key: chex.PRNGKey) -> Tuple[State, TimeStep]: 97 """Reset the environment. Updates the step count.""" 98 state, timestep = self._env.reset(key) 99 return state, self.modify_timestep(timestep) 100 101 def step(self, state: State, action: chex.Array) -> Tuple[State, TimeStep]: 102 """Step the environment. Updates the step count.""" 103 state, timestep = self._env.step(state, action) 104 return state, self.modify_timestep(timestep) 105 106 def observation_spec(self) -> specs.Spec[ObservationGlobalState]: 107 """Specification of the observation of the selected environment.""" 108 109 obs_spec = self._env.observation_spec() 110 num_obs_features = obs_spec.agents_view.shape[-1] 111 global_state = specs.Array( 112 (self._env.num_agents, self._env.num_agents * num_obs_features), 113 obs_spec.agents_view.dtype, 114 "global_state", 115 ) 116 117 return specs.Spec( 118 ObservationGlobalState, 119 "ObservationSpec", 120 agents_view=obs_spec.agents_view, 121 action_mask=obs_spec.action_mask, 122 global_state=global_state, 123 step_count=obs_spec.step_count, 124 ) 125 [end of mava/wrappers/observation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mava/wrappers/observation.py b/mava/wrappers/observation.py --- a/mava/wrappers/observation.py +++ b/mava/wrappers/observation.py @@ -38,7 +38,11 @@ """Adds agent IDs to the observation.""" obs = timestep.observation agent_ids = jnp.eye(num_agents) - agents_view = jnp.concatenate([agent_ids, obs.agents_view], axis=-1) + agents_view = jnp.concatenate( + [agent_ids, obs.agents_view], + axis=-1, + dtype=obs.agents_view.dtype, + ) return obs._replace(agents_view=agents_view) # type: ignore
{"golden_diff": "diff --git a/mava/wrappers/observation.py b/mava/wrappers/observation.py\n--- a/mava/wrappers/observation.py\n+++ b/mava/wrappers/observation.py\n@@ -38,7 +38,11 @@\n \"\"\"Adds agent IDs to the observation.\"\"\"\n obs = timestep.observation\n agent_ids = jnp.eye(num_agents)\n- agents_view = jnp.concatenate([agent_ids, obs.agents_view], axis=-1)\n+ agents_view = jnp.concatenate(\n+ [agent_ids, obs.agents_view],\n+ axis=-1,\n+ dtype=obs.agents_view.dtype,\n+ )\n \n return obs._replace(agents_view=agents_view) # type: ignore\n", "issue": "[BUG]: Flasbax bug on LBF (type issue)\n### Describe the bug\r\nIn the AgentID wrapper, the new_agents_view type is not enforced to be consistent, so for LBF with flashbax a dtype error emerges.\r\n\r\n### To Reproduce\r\nSteps to reproduce the behavior:\r\n1. Run LBF with flashbax.\r\n\r\n### Expected behavior\r\nExpected the observation to be added easily to the buffer.\r\n\r\n### Context (Environment)\r\n - Updated jumanji\r\n\r\n### Additional context\r\nThis is somewhat an exercise in opening an issue!\r\n\r\n### Possible Solution\r\nWill make a PR soon! Basically a cast to the agents_view type.\r\n\n", "before_files": [{"content": "# Copyright 2022 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Tuple, Union\n\nimport chex\nimport jax.numpy as jnp\nfrom jumanji import specs\nfrom jumanji.env import Environment\nfrom jumanji.types import TimeStep\nfrom jumanji.wrappers import Wrapper\n\nfrom mava.types import Observation, ObservationGlobalState, State\n\n\nclass AgentIDWrapper(Wrapper):\n \"\"\"A wrapper to add a one-hot vector as agent IDs to the original observation.\n It can be useful in multi-agent environments where agents require unique identification.\n \"\"\"\n\n def __init__(self, env: Environment):\n super().__init__(env)\n\n def _add_agent_ids(\n self, timestep: TimeStep, num_agents: int\n ) -> Union[Observation, ObservationGlobalState]:\n \"\"\"Adds agent IDs to the observation.\"\"\"\n obs = timestep.observation\n agent_ids = jnp.eye(num_agents)\n agents_view = jnp.concatenate([agent_ids, obs.agents_view], axis=-1)\n\n return obs._replace(agents_view=agents_view) # type: ignore\n\n def reset(self, key: chex.PRNGKey) -> Tuple[State, TimeStep]:\n \"\"\"Reset the environment.\"\"\"\n state, timestep = self._env.reset(key)\n timestep.observation = self._add_agent_ids(timestep, self._env.num_agents)\n\n return state, timestep\n\n def step(\n self,\n state: State,\n action: chex.Array,\n ) -> Tuple[State, TimeStep]:\n \"\"\"Step the environment.\"\"\"\n state, timestep = self._env.step(state, action)\n timestep.observation = self._add_agent_ids(timestep, self._env.num_agents)\n\n return state, timestep\n\n def observation_spec(\n self,\n ) -> Union[specs.Spec[Observation], specs.Spec[ObservationGlobalState]]:\n \"\"\"Specification of the observation of the selected environment.\"\"\"\n obs_spec = self._env.observation_spec()\n num_obs_features = obs_spec.agents_view.shape[-1] + self._env.num_agents\n dtype = obs_spec.agents_view.dtype\n agents_view = specs.Array((self._env.num_agents, num_obs_features), dtype, \"agents_view\")\n\n return obs_spec.replace(agents_view=agents_view)\n\n\nclass GlobalStateWrapper(Wrapper):\n \"\"\"Wrapper for adding global state to an environment that follows the mava API.\n\n The wrapper includes a global environment state to be used by the centralised critic.\n Note here that since most environments do not have a global state, we create one\n by concatenating the observations of all agents.\n \"\"\"\n\n def modify_timestep(self, timestep: TimeStep) -> TimeStep[ObservationGlobalState]:\n global_state = jnp.concatenate(timestep.observation.agents_view, axis=0)\n global_state = jnp.tile(global_state, (self._env.num_agents, 1))\n\n observation = ObservationGlobalState(\n global_state=global_state,\n agents_view=timestep.observation.agents_view,\n action_mask=timestep.observation.action_mask,\n step_count=timestep.observation.step_count,\n )\n\n return timestep.replace(observation=observation)\n\n def reset(self, key: chex.PRNGKey) -> Tuple[State, TimeStep]:\n \"\"\"Reset the environment. Updates the step count.\"\"\"\n state, timestep = self._env.reset(key)\n return state, self.modify_timestep(timestep)\n\n def step(self, state: State, action: chex.Array) -> Tuple[State, TimeStep]:\n \"\"\"Step the environment. Updates the step count.\"\"\"\n state, timestep = self._env.step(state, action)\n return state, self.modify_timestep(timestep)\n\n def observation_spec(self) -> specs.Spec[ObservationGlobalState]:\n \"\"\"Specification of the observation of the selected environment.\"\"\"\n\n obs_spec = self._env.observation_spec()\n num_obs_features = obs_spec.agents_view.shape[-1]\n global_state = specs.Array(\n (self._env.num_agents, self._env.num_agents * num_obs_features),\n obs_spec.agents_view.dtype,\n \"global_state\",\n )\n\n return specs.Spec(\n ObservationGlobalState,\n \"ObservationSpec\",\n agents_view=obs_spec.agents_view,\n action_mask=obs_spec.action_mask,\n global_state=global_state,\n step_count=obs_spec.step_count,\n )\n", "path": "mava/wrappers/observation.py"}]}
2,019
163
gh_patches_debug_62030
rasdani/github-patches
git_diff
fonttools__fonttools-2472
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [feaLib] "fonttools feaLib" should error out, not continue If there's a parse/build error when using the feaLib command line tool, we currently do this: https://github.com/fonttools/fonttools/blob/445108f735b22a5ca37f669808d47906d024fe24/Lib/fontTools/feaLib/__main__.py#L69-L73 i.e. we save the font anyway and exit with status code 0. My Makefiles and I think this is a terrible idea, and I would like to change it. Any objections / thoughts? </issue> <code> [start of Lib/fontTools/feaLib/__main__.py] 1 from fontTools.ttLib import TTFont 2 from fontTools.feaLib.builder import addOpenTypeFeatures, Builder 3 from fontTools.feaLib.error import FeatureLibError 4 from fontTools import configLogger 5 from fontTools.misc.cliTools import makeOutputFileName 6 import sys 7 import argparse 8 import logging 9 10 11 log = logging.getLogger("fontTools.feaLib") 12 13 14 def main(args=None): 15 """Add features from a feature file (.fea) into a OTF font""" 16 parser = argparse.ArgumentParser( 17 description="Use fontTools to compile OpenType feature files (*.fea)." 18 ) 19 parser.add_argument( 20 "input_fea", metavar="FEATURES", help="Path to the feature file" 21 ) 22 parser.add_argument( 23 "input_font", metavar="INPUT_FONT", help="Path to the input font" 24 ) 25 parser.add_argument( 26 "-o", 27 "--output", 28 dest="output_font", 29 metavar="OUTPUT_FONT", 30 help="Path to the output font.", 31 ) 32 parser.add_argument( 33 "-t", 34 "--tables", 35 metavar="TABLE_TAG", 36 choices=Builder.supportedTables, 37 nargs="+", 38 help="Specify the table(s) to be built.", 39 ) 40 parser.add_argument( 41 "-d", 42 "--debug", 43 action="store_true", 44 help="Add source-level debugging information to font.", 45 ) 46 parser.add_argument( 47 "-v", 48 "--verbose", 49 help="increase the logger verbosity. Multiple -v " "options are allowed.", 50 action="count", 51 default=0, 52 ) 53 parser.add_argument( 54 "--traceback", help="show traceback for exceptions.", action="store_true" 55 ) 56 options = parser.parse_args(args) 57 58 levels = ["WARNING", "INFO", "DEBUG"] 59 configLogger(level=levels[min(len(levels) - 1, options.verbose)]) 60 61 output_font = options.output_font or makeOutputFileName(options.input_font) 62 log.info("Compiling features to '%s'" % (output_font)) 63 64 font = TTFont(options.input_font) 65 try: 66 addOpenTypeFeatures( 67 font, options.input_fea, tables=options.tables, debug=options.debug 68 ) 69 except FeatureLibError as e: 70 if options.traceback: 71 raise 72 log.error(e) 73 font.save(output_font) 74 75 76 if __name__ == "__main__": 77 sys.exit(main()) 78 [end of Lib/fontTools/feaLib/__main__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Lib/fontTools/feaLib/__main__.py b/Lib/fontTools/feaLib/__main__.py --- a/Lib/fontTools/feaLib/__main__.py +++ b/Lib/fontTools/feaLib/__main__.py @@ -70,6 +70,7 @@ if options.traceback: raise log.error(e) + sys.exit(1) font.save(output_font)
{"golden_diff": "diff --git a/Lib/fontTools/feaLib/__main__.py b/Lib/fontTools/feaLib/__main__.py\n--- a/Lib/fontTools/feaLib/__main__.py\n+++ b/Lib/fontTools/feaLib/__main__.py\n@@ -70,6 +70,7 @@\n if options.traceback:\n raise\n log.error(e)\n+ sys.exit(1)\n font.save(output_font)\n", "issue": "[feaLib] \"fonttools feaLib\" should error out, not continue\nIf there's a parse/build error when using the feaLib command line tool, we currently do this:\r\n\r\nhttps://github.com/fonttools/fonttools/blob/445108f735b22a5ca37f669808d47906d024fe24/Lib/fontTools/feaLib/__main__.py#L69-L73\r\n\r\ni.e. we save the font anyway and exit with status code 0.\r\n\r\nMy Makefiles and I think this is a terrible idea, and I would like to change it. Any objections / thoughts?\r\n\r\n\n", "before_files": [{"content": "from fontTools.ttLib import TTFont\nfrom fontTools.feaLib.builder import addOpenTypeFeatures, Builder\nfrom fontTools.feaLib.error import FeatureLibError\nfrom fontTools import configLogger\nfrom fontTools.misc.cliTools import makeOutputFileName\nimport sys\nimport argparse\nimport logging\n\n\nlog = logging.getLogger(\"fontTools.feaLib\")\n\n\ndef main(args=None):\n \"\"\"Add features from a feature file (.fea) into a OTF font\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Use fontTools to compile OpenType feature files (*.fea).\"\n )\n parser.add_argument(\n \"input_fea\", metavar=\"FEATURES\", help=\"Path to the feature file\"\n )\n parser.add_argument(\n \"input_font\", metavar=\"INPUT_FONT\", help=\"Path to the input font\"\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n dest=\"output_font\",\n metavar=\"OUTPUT_FONT\",\n help=\"Path to the output font.\",\n )\n parser.add_argument(\n \"-t\",\n \"--tables\",\n metavar=\"TABLE_TAG\",\n choices=Builder.supportedTables,\n nargs=\"+\",\n help=\"Specify the table(s) to be built.\",\n )\n parser.add_argument(\n \"-d\",\n \"--debug\",\n action=\"store_true\",\n help=\"Add source-level debugging information to font.\",\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"increase the logger verbosity. Multiple -v \" \"options are allowed.\",\n action=\"count\",\n default=0,\n )\n parser.add_argument(\n \"--traceback\", help=\"show traceback for exceptions.\", action=\"store_true\"\n )\n options = parser.parse_args(args)\n\n levels = [\"WARNING\", \"INFO\", \"DEBUG\"]\n configLogger(level=levels[min(len(levels) - 1, options.verbose)])\n\n output_font = options.output_font or makeOutputFileName(options.input_font)\n log.info(\"Compiling features to '%s'\" % (output_font))\n\n font = TTFont(options.input_font)\n try:\n addOpenTypeFeatures(\n font, options.input_fea, tables=options.tables, debug=options.debug\n )\n except FeatureLibError as e:\n if options.traceback:\n raise\n log.error(e)\n font.save(output_font)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "path": "Lib/fontTools/feaLib/__main__.py"}]}
1,347
96
gh_patches_debug_51
rasdani/github-patches
git_diff
magenta__magenta-1254
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Pip installation fails due to librosa dependency Hi, I'm trying to install the magenta-gpu but when I did a pip install magenta-gpu: **librosa 0.6.2 has requirement joblib>=0.12, but you'll have joblib 0.11 which is incompatible.** </issue> <code> [start of magenta/version.py] 1 # Copyright 2016 Google Inc. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 r"""Separate file for storing the current version of Magenta. 15 16 Stored in a separate file so that setup.py can reference the version without 17 pulling in all the dependencies in __init__.py. 18 """ 19 20 __version__ = '0.3.10' 21 [end of magenta/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/magenta/version.py b/magenta/version.py --- a/magenta/version.py +++ b/magenta/version.py @@ -17,4 +17,4 @@ pulling in all the dependencies in __init__.py. """ -__version__ = '0.3.10' +__version__ = '0.3.11'
{"golden_diff": "diff --git a/magenta/version.py b/magenta/version.py\n--- a/magenta/version.py\n+++ b/magenta/version.py\n@@ -17,4 +17,4 @@\n pulling in all the dependencies in __init__.py.\n \"\"\"\n \n-__version__ = '0.3.10'\n+__version__ = '0.3.11'\n", "issue": "Pip installation fails due to librosa dependency\nHi,\r\n\r\nI'm trying to install the magenta-gpu but when I did a pip install magenta-gpu:\r\n\r\n**librosa 0.6.2 has requirement joblib>=0.12, but you'll have joblib 0.11 which is incompatible.**\r\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"Separate file for storing the current version of Magenta.\n\nStored in a separate file so that setup.py can reference the version without\npulling in all the dependencies in __init__.py.\n\"\"\"\n\n__version__ = '0.3.10'\n", "path": "magenta/version.py"}]}
828
80
gh_patches_debug_2350
rasdani/github-patches
git_diff
mirumee__ariadne-184
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update setup.py to include html and py.typed files in published package Ariadne now includes `graphql_playground.html` django template and `py.typed` file for enabling typing. We should make sure those two get published together with rest of the project. </issue> <code> [start of setup.py] 1 #! /usr/bin/env python 2 import os 3 from setuptools import setup 4 5 CLASSIFIERS = [ 6 "Development Status :: 4 - Beta", 7 "Intended Audience :: Developers", 8 "License :: OSI Approved :: BSD License", 9 "Operating System :: OS Independent", 10 "Programming Language :: Python", 11 "Programming Language :: Python :: 3.6", 12 "Programming Language :: Python :: 3.7", 13 "Topic :: Software Development :: Libraries :: Python Modules", 14 ] 15 16 README_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md") 17 with open(README_PATH, "r") as f: 18 README = f.read() 19 20 setup( 21 name="ariadne", 22 author="Mirumee Software", 23 author_email="[email protected]", 24 description="Ariadne is a Python library for implementing GraphQL servers.", 25 long_description=README, 26 long_description_content_type="text/markdown", 27 license="BSD", 28 version="0.4.0", 29 url="https://github.com/mirumee/ariadne", 30 packages=["ariadne"], 31 package_data={"ariadne": ["py.typed"]}, 32 install_requires=[ 33 "graphql-core-next>=1.0.4", 34 "starlette<0.13", 35 "typing_extensions>=3.6.0", 36 ], 37 classifiers=CLASSIFIERS, 38 platforms=["any"], 39 zip_safe=False, 40 ) 41 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py old mode 100644 new mode 100755 --- a/setup.py +++ b/setup.py @@ -28,7 +28,7 @@ version="0.4.0", url="https://github.com/mirumee/ariadne", packages=["ariadne"], - package_data={"ariadne": ["py.typed"]}, + include_package_data=True, install_requires=[ "graphql-core-next>=1.0.4", "starlette<0.13",
{"golden_diff": "diff --git a/setup.py b/setup.py\nold mode 100644\nnew mode 100755\n--- a/setup.py\n+++ b/setup.py\n@@ -28,7 +28,7 @@\n version=\"0.4.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n- package_data={\"ariadne\": [\"py.typed\"]},\n+ include_package_data=True,\n install_requires=[\n \"graphql-core-next>=1.0.4\",\n \"starlette<0.13\",\n", "issue": "Update setup.py to include html and py.typed files in published package\nAriadne now includes `graphql_playground.html` django template and `py.typed` file for enabling typing. We should make sure those two get published together with rest of the project.\n", "before_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.4.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n package_data={\"ariadne\": [\"py.typed\"]},\n install_requires=[\n \"graphql-core-next>=1.0.4\",\n \"starlette<0.13\",\n \"typing_extensions>=3.6.0\",\n ],\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}]}
966
133
gh_patches_debug_36798
rasdani/github-patches
git_diff
Mailu__Mailu-3025
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [SUGG] Little verification in Mailu setup Hi thanks for Mailu it is a good project. I submit this suggestion because i made the error and take many time to find it. In step 4 of Mailu Setup for Docker compose : **Subnet of the docker network** it could be nice to verify if the last octet of the IP4 address is equal to 0 because if it is not the SMTP wont work. ![mailu](https://user-images.githubusercontent.com/11191723/83033266-302b2200-a037-11ea-9774-ffc326700811.jpg) Regards </issue> <code> [start of setup/server.py] 1 import flask 2 import flask_bootstrap 3 import redis 4 import json 5 import os 6 import jinja2 7 import uuid 8 import string 9 import random 10 import ipaddress 11 import hashlib 12 import time 13 14 15 version = os.getenv("this_version", "master") 16 static_url_path = "/" + version + "/static" 17 app = flask.Flask(__name__, static_url_path=static_url_path) 18 flask_bootstrap.Bootstrap(app) 19 db = redis.StrictRedis(host='redis', port=6379, db=0) 20 21 22 def render_flavor(flavor, template, data): 23 return flask.render_template( 24 os.path.join(flavor, template), 25 **data 26 ) 27 28 29 @app.add_template_global 30 def secret(length=16): 31 charset = string.ascii_uppercase + string.digits 32 return ''.join( 33 random.SystemRandom().choice(charset) 34 for _ in range(length) 35 ) 36 37 #Original copied from https://github.com/andrewlkho/ulagen 38 def random_ipv6_subnet(): 39 eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff 40 eui64_canon = "-".join([format(eui64, "02X")[i:i+2] for i in range(0, 18, 2)]) 41 42 h = hashlib.sha1() 43 h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8')) 44 globalid = h.hexdigest()[0:10] 45 46 prefix = ":".join(("fd" + globalid[0:2], globalid[2:6], globalid[6:10])) 47 return prefix 48 49 def build_app(path): 50 51 app.jinja_env.trim_blocks = True 52 app.jinja_env.lstrip_blocks = True 53 54 @app.context_processor 55 def app_context(): 56 return dict( 57 versions=os.getenv("VERSIONS","master").split(','), 58 stable_version = os.getenv("stable_version", "master") 59 ) 60 61 prefix_bp = flask.Blueprint(version.replace(".", "_"), __name__) 62 prefix_bp.jinja_loader = jinja2.ChoiceLoader([ 63 jinja2.FileSystemLoader(os.path.join(path, "templates")), 64 jinja2.FileSystemLoader(os.path.join(path, "flavors")) 65 ]) 66 67 root_bp = flask.Blueprint("root", __name__) 68 root_bp.jinja_loader = jinja2.ChoiceLoader([ 69 jinja2.FileSystemLoader(os.path.join(path, "templates")), 70 jinja2.FileSystemLoader(os.path.join(path, "flavors")) 71 ]) 72 73 @prefix_bp.context_processor 74 @root_bp.context_processor 75 def bp_context(version=version): 76 return dict(version=version) 77 78 @prefix_bp.route("/") 79 @root_bp.route("/") 80 def wizard(): 81 return flask.render_template( 82 'wizard.html', 83 flavor="compose", 84 steps=sorted(os.listdir(os.path.join(path, "templates", "steps", "compose"))), 85 subnet6=random_ipv6_subnet() 86 ) 87 88 @prefix_bp.route("/submit", methods=["POST"]) 89 @root_bp.route("/submit", methods=["POST"]) 90 def submit(): 91 data = flask.request.form.copy() 92 data['uid'] = str(uuid.uuid4()) 93 try: 94 data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2]) 95 except ValueError as err: 96 return "Error while generating files: " + str(err) 97 db.set(data['uid'], json.dumps(data)) 98 return flask.redirect(flask.url_for('.setup', uid=data['uid'])) 99 100 @prefix_bp.route("/setup/<uid>", methods=["GET"]) 101 @root_bp.route("/setup/<uid>", methods=["GET"]) 102 def setup(uid): 103 data = json.loads(db.get(uid)) 104 flavor = data.get("flavor", "compose") 105 rendered = render_flavor(flavor, "setup.html", data) 106 return flask.render_template("setup.html", contents=rendered) 107 108 @prefix_bp.route("/file/<uid>/<filepath>", methods=["GET"]) 109 @root_bp.route("/file/<uid>/<filepath>", methods=["GET"]) 110 def file(uid, filepath): 111 data = json.loads(db.get(uid)) 112 flavor = data.get("flavor", "compose") 113 return flask.Response( 114 render_flavor(flavor, filepath, data), 115 mimetype="application/text" 116 ) 117 118 app.register_blueprint(prefix_bp, url_prefix="/{}".format(version)) 119 app.register_blueprint(root_bp) 120 121 122 if __name__ == "__main__": 123 build_app("/tmp/mailutest") 124 app.run(debug=True) 125 [end of setup/server.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup/server.py b/setup/server.py --- a/setup/server.py +++ b/setup/server.py @@ -10,12 +10,16 @@ import ipaddress import hashlib import time - +import secrets +from flask_bootstrap import StaticCDN version = os.getenv("this_version", "master") static_url_path = "/" + version + "/static" app = flask.Flask(__name__, static_url_path=static_url_path) +app.secret_key = secrets.token_hex(16) flask_bootstrap.Bootstrap(app) +# Load our jQuery. Do not use jQuery 1. +app.extensions['bootstrap']['cdns']['jquery'] = StaticCDN() db = redis.StrictRedis(host='redis', port=6379, db=0) @@ -90,12 +94,47 @@ def submit(): data = flask.request.form.copy() data['uid'] = str(uuid.uuid4()) + valid = True + try: + ipaddress.IPv4Address(data['bind4']) + except: + flask.flash('Configured IPv4 address is invalid', 'error') + valid = False + try: + ipaddress.IPv6Address(data['bind6']) + except: + flask.flash('Configured IPv6 address is invalid', 'error') + valid = False + try: + ipaddress.IPv4Network(data['subnet']) + except: + flask.flash('Configured subnet(IPv4) is invalid', 'error') + valid = False + try: + ipaddress.IPv6Network(data['subnet6']) + except: + flask.flash('Configured subnet(IPv6) is invalid', 'error') + valid = False try: data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2]) except ValueError as err: - return "Error while generating files: " + str(err) - db.set(data['uid'], json.dumps(data)) - return flask.redirect(flask.url_for('.setup', uid=data['uid'])) + flask.flash('Invalid configuration: ' + str(err)) + valid = False + if 'api_enabled' in data: + if (data['api_enabled'] == 'true'): + if data['api_token'] == '': + flask.flash('API token cannot be empty when API is enabled', 'error') + valid = False + if valid: + db.set(data['uid'], json.dumps(data)) + return flask.redirect(flask.url_for('.setup', uid=data['uid'])) + else: + return flask.render_template( + 'wizard.html', + flavor="compose", + steps=sorted(os.listdir(os.path.join(path, "templates", "steps", "compose"))), + subnet6=random_ipv6_subnet() + ) @prefix_bp.route("/setup/<uid>", methods=["GET"]) @root_bp.route("/setup/<uid>", methods=["GET"])
{"golden_diff": "diff --git a/setup/server.py b/setup/server.py\n--- a/setup/server.py\n+++ b/setup/server.py\n@@ -10,12 +10,16 @@\n import ipaddress\n import hashlib\n import time\n-\n+import secrets\n+from flask_bootstrap import StaticCDN\n \n version = os.getenv(\"this_version\", \"master\")\n static_url_path = \"/\" + version + \"/static\"\n app = flask.Flask(__name__, static_url_path=static_url_path)\n+app.secret_key = secrets.token_hex(16)\n flask_bootstrap.Bootstrap(app)\n+# Load our jQuery. Do not use jQuery 1.\n+app.extensions['bootstrap']['cdns']['jquery'] = StaticCDN()\n db = redis.StrictRedis(host='redis', port=6379, db=0)\n \n \n@@ -90,12 +94,47 @@\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n+ valid = True\n+ try:\n+ ipaddress.IPv4Address(data['bind4'])\n+ except:\n+ flask.flash('Configured IPv4 address is invalid', 'error')\n+ valid = False\n+ try:\n+ ipaddress.IPv6Address(data['bind6'])\n+ except:\n+ flask.flash('Configured IPv6 address is invalid', 'error')\n+ valid = False\n+ try:\n+ ipaddress.IPv4Network(data['subnet'])\n+ except:\n+ flask.flash('Configured subnet(IPv4) is invalid', 'error')\n+ valid = False\n+ try:\n+ ipaddress.IPv6Network(data['subnet6'])\n+ except:\n+ flask.flash('Configured subnet(IPv6) is invalid', 'error')\n+ valid = False\n try:\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])\n except ValueError as err:\n- return \"Error while generating files: \" + str(err)\n- db.set(data['uid'], json.dumps(data))\n- return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n+ flask.flash('Invalid configuration: ' + str(err))\n+ valid = False\n+ if 'api_enabled' in data:\n+ if (data['api_enabled'] == 'true'):\n+ if data['api_token'] == '':\n+ flask.flash('API token cannot be empty when API is enabled', 'error')\n+ valid = False\n+ if valid:\n+ db.set(data['uid'], json.dumps(data))\n+ return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n+ else:\n+ return flask.render_template(\n+ 'wizard.html',\n+ flavor=\"compose\",\n+ steps=sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", \"compose\"))),\n+ subnet6=random_ipv6_subnet()\n+ )\n \n @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n", "issue": "[SUGG] Little verification in Mailu setup\nHi thanks for Mailu it is a good project.\r\nI submit this suggestion because i made the error and take many time to find it.\r\nIn step 4 of Mailu Setup for Docker compose : \r\n**Subnet of the docker network** it could be nice to verify if the last octet of the IP4 address is equal to 0 because if it is not the SMTP wont work.\r\n\r\n![mailu](https://user-images.githubusercontent.com/11191723/83033266-302b2200-a037-11ea-9774-ffc326700811.jpg)\r\n\r\nRegards \r\n\n", "before_files": [{"content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\nimport time\n\n\nversion = os.getenv(\"this_version\", \"master\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__, static_url_path=static_url_path)\nflask_bootstrap.Bootstrap(app)\ndb = redis.StrictRedis(host='redis', port=6379, db=0)\n\n\ndef render_flavor(flavor, template, data):\n return flask.render_template(\n os.path.join(flavor, template),\n **data\n )\n\n\[email protected]_template_global\ndef secret(length=16):\n charset = string.ascii_uppercase + string.digits\n return ''.join(\n random.SystemRandom().choice(charset)\n for _ in range(length)\n )\n\n#Original copied from https://github.com/andrewlkho/ulagen\ndef random_ipv6_subnet():\n eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff\n eui64_canon = \"-\".join([format(eui64, \"02X\")[i:i+2] for i in range(0, 18, 2)])\n\n h = hashlib.sha1()\n h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))\n globalid = h.hexdigest()[0:10]\n\n prefix = \":\".join((\"fd\" + globalid[0:2], globalid[2:6], globalid[6:10]))\n return prefix\n\ndef build_app(path):\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n @app.context_processor\n def app_context():\n return dict(\n versions=os.getenv(\"VERSIONS\",\"master\").split(','),\n stable_version = os.getenv(\"stable_version\", \"master\")\n )\n\n prefix_bp = flask.Blueprint(version.replace(\".\", \"_\"), __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n root_bp = flask.Blueprint(\"root\", __name__)\n root_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n @prefix_bp.context_processor\n @root_bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n\n @prefix_bp.route(\"/\")\n @root_bp.route(\"/\")\n def wizard():\n return flask.render_template(\n 'wizard.html',\n flavor=\"compose\",\n steps=sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", \"compose\"))),\n subnet6=random_ipv6_subnet()\n )\n\n @prefix_bp.route(\"/submit\", methods=[\"POST\"])\n @root_bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n try:\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])\n except ValueError as err:\n return \"Error while generating files: \" + str(err)\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n\n @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n\n @prefix_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n @root_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n return flask.Response(\n render_flavor(flavor, filepath, data),\n mimetype=\"application/text\"\n )\n\n app.register_blueprint(prefix_bp, url_prefix=\"/{}\".format(version))\n app.register_blueprint(root_bp)\n\n\nif __name__ == \"__main__\":\n build_app(\"/tmp/mailutest\")\n app.run(debug=True)\n", "path": "setup/server.py"}]}
1,987
657
gh_patches_debug_10910
rasdani/github-patches
git_diff
learningequality__kolibri-4096
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The new learners and coachs account, can't view the kolibri 0.11.0a3 at first login <!-- Instructions: * Fill out the sections below, replace …'s with information about your issue * Use the 'preview' function above this text box to verify formatting before submitting --> ### Observed behavior When I create a learners account and test to login, the page show like this "The requested URL / was not found on this server." <!-- Description of the behavior that was observed, including screenshots or other references when applicable --> ### Expected behavior Must login to kolibri page. <!-- Description of what behavior was expected but did not occur --> ### Errors and logs ``` The character encoding of the HTML document was not declared. The document will render with garbled text in some browser configurations if the document contains characters from outside the US-ASCII range. The character encoding of the page must be declared in the document or in the transfer protocol. 127.0.0.1:8080 ``` <!-- Relevant logs from: * the command line * ~/.kolibri/kolibri.log * the browser console Please wrap errors in triple backticks for clean formatting like this: ``` 01:10 info: something happened 01:12 error: something bad happened ``` --> … ### Steps to reproduce 1. login with admin account 2. go to facility and user tab 3. create new learner account 4. try to login the new learner account and see the error page. <!-- Precise steps that someone else can follow in order to see this behavior --> … ### Context Windows 7 and 10 kolibri 0.11.0a3 firefox 61.0.1(64-bit) <!-- Tell us about your environment, including: * Kolibri version * Operating system * Browser --> ### Screenshot ![errorloginpage](https://user-images.githubusercontent.com/38421180/42367122-fb1ae096-80b7-11e8-9165-6e73b2702a72.png) </issue> <code> [start of kolibri/core/views.py] 1 from django import http 2 from django.conf import settings 3 from django.contrib.auth import logout 4 from django.core.urlresolvers import translate_url 5 from django.http import Http404 6 from django.http import HttpResponseRedirect 7 from django.utils.http import is_safe_url 8 from django.utils.translation import check_for_language 9 from django.utils.translation import LANGUAGE_SESSION_KEY 10 from django.utils.translation import ugettext_lazy as _ 11 from django.views.generic.base import View 12 from django.views.i18n import LANGUAGE_QUERY_PARAMETER 13 14 from kolibri.core.auth.constants import user_kinds 15 from kolibri.core.auth.models import Role 16 from kolibri.core.decorators import signin_redirect_exempt 17 from kolibri.core.hooks import RoleBasedRedirectHook 18 19 20 # Modified from django.views.i18n 21 @signin_redirect_exempt 22 def set_language(request): 23 """ 24 Redirect to a given url while setting the chosen language in the 25 session or cookie. The url and the language code need to be 26 specified in the request parameters. 27 Since this view changes how the user will see the rest of the site, it must 28 only be accessed as a POST request. If called as a GET request, it will 29 redirect to the page in the request (the 'next' parameter) without changing 30 any state. 31 """ 32 next = request.POST.get('next', request.GET.get('next')) 33 if not is_safe_url(url=next, host=request.get_host()): 34 next = request.META.get('HTTP_REFERER') 35 if not is_safe_url(url=next, host=request.get_host()): 36 next = '/' 37 response = http.HttpResponseRedirect(next) 38 if request.method == 'POST': 39 lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER) 40 if lang_code and check_for_language(lang_code): 41 next_trans = translate_url(next, lang_code) 42 if next_trans != next: 43 response = http.HttpResponseRedirect(next_trans) 44 if hasattr(request, 'session'): 45 request.session[LANGUAGE_SESSION_KEY] = lang_code 46 # Always set cookie 47 response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code, 48 max_age=settings.LANGUAGE_COOKIE_AGE, 49 path=settings.LANGUAGE_COOKIE_PATH, 50 domain=settings.LANGUAGE_COOKIE_DOMAIN) 51 return response 52 53 54 def logout_view(request): 55 logout(request) 56 return http.HttpResponseRedirect('/') 57 58 59 def get_url_by_role(role, first_login): 60 obj = next((hook for hook in RoleBasedRedirectHook().registered_hooks 61 if hook.role == role and hook.first_login == first_login), None) 62 if obj: 63 return obj.url 64 65 66 class GuestRedirectView(View): 67 def get(self, request): 68 """ 69 Redirects a guest user to a learner accessible page. 70 """ 71 return HttpResponseRedirect(get_url_by_role(user_kinds.LEARNER, False)) 72 73 74 class RootURLRedirectView(View): 75 76 def get(self, request): 77 """ 78 Redirects user based on the highest role they have for which a redirect is defined. 79 """ 80 first_login = request.session.get("first_login", False) 81 if request.user.is_authenticated(): 82 url = None 83 if request.user.is_superuser: 84 url = url or get_url_by_role(user_kinds.SUPERUSER, first_login) 85 roles = set(Role.objects.filter(user_id=request.user.id).values_list('kind', flat=True).distinct()) 86 if user_kinds.ADMIN in roles: 87 url = url or get_url_by_role(user_kinds.ADMIN, first_login) 88 if user_kinds.COACH in roles: 89 url = url or get_url_by_role(user_kinds.COACH, first_login) 90 url = url or get_url_by_role(user_kinds.LEARNER, first_login) 91 else: 92 url = get_url_by_role(user_kinds.ANONYMOUS, first_login) 93 if url: 94 return HttpResponseRedirect(url) 95 raise Http404(_("No appropriate redirect pages found, it is likely that Kolibri is badly configured")) 96 [end of kolibri/core/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kolibri/core/views.py b/kolibri/core/views.py --- a/kolibri/core/views.py +++ b/kolibri/core/views.py @@ -59,6 +59,13 @@ def get_url_by_role(role, first_login): obj = next((hook for hook in RoleBasedRedirectHook().registered_hooks if hook.role == role and hook.first_login == first_login), None) + + if obj is None and first_login: + # If it is the first_login, do a fallback to find the non-first login behaviour when it is + # not available + obj = next((hook for hook in RoleBasedRedirectHook().registered_hooks + if hook.role == role and hook.first_login is False), None) + if obj: return obj.url
{"golden_diff": "diff --git a/kolibri/core/views.py b/kolibri/core/views.py\n--- a/kolibri/core/views.py\n+++ b/kolibri/core/views.py\n@@ -59,6 +59,13 @@\n def get_url_by_role(role, first_login):\n obj = next((hook for hook in RoleBasedRedirectHook().registered_hooks\n if hook.role == role and hook.first_login == first_login), None)\n+\n+ if obj is None and first_login:\n+ # If it is the first_login, do a fallback to find the non-first login behaviour when it is\n+ # not available\n+ obj = next((hook for hook in RoleBasedRedirectHook().registered_hooks\n+ if hook.role == role and hook.first_login is False), None)\n+\n if obj:\n return obj.url\n", "issue": "The new learners and coachs account, can't view the kolibri 0.11.0a3 at first login\n<!--\r\nInstructions:\r\n * Fill out the sections below, replace \u2026's with information about your issue\r\n * Use the 'preview' function above this text box to verify formatting before submitting\r\n-->\r\n\r\n### Observed behavior\r\nWhen I create a learners account and test to login, the page show like this \"The requested URL / was not found on this server.\"\r\n\r\n<!--\r\nDescription of the behavior that was observed, including screenshots or other references when applicable\r\n-->\r\n\r\n\r\n\r\n### Expected behavior\r\nMust login to kolibri page.\r\n<!--\r\nDescription of what behavior was expected but did not occur\r\n-->\r\n\r\n\r\n### Errors and logs\r\n```\r\nThe character encoding of the HTML document was not declared. The document will render with garbled text in some browser configurations if the document contains characters from outside the US-ASCII range. The character encoding of the page must be declared in the document or in the transfer protocol. 127.0.0.1:8080 \r\n```\r\n<!--\r\nRelevant logs from:\r\n * the command line\r\n * ~/.kolibri/kolibri.log\r\n * the browser console\r\n\r\nPlease wrap errors in triple backticks for clean formatting like this:\r\n```\r\n01:10 info: something happened\r\n01:12 error: something bad happened\r\n```\r\n-->\r\n\r\n\u2026\r\n\r\n### Steps to reproduce\r\n1. login with admin account\r\n2. go to facility and user tab\r\n3. create new learner account\r\n4. try to login the new learner account and see the error page.\r\n<!--\r\nPrecise steps that someone else can follow in order to see this behavior\r\n-->\r\n\r\n\u2026\r\n\r\n### Context\r\nWindows 7 and 10\r\nkolibri 0.11.0a3\r\nfirefox 61.0.1(64-bit)\r\n<!--\r\nTell us about your environment, including:\r\n * Kolibri version\r\n * Operating system\r\n * Browser\r\n-->\r\n### Screenshot\r\n![errorloginpage](https://user-images.githubusercontent.com/38421180/42367122-fb1ae096-80b7-11e8-9165-6e73b2702a72.png)\r\n\n", "before_files": [{"content": "from django import http\nfrom django.conf import settings\nfrom django.contrib.auth import logout\nfrom django.core.urlresolvers import translate_url\nfrom django.http import Http404\nfrom django.http import HttpResponseRedirect\nfrom django.utils.http import is_safe_url\nfrom django.utils.translation import check_for_language\nfrom django.utils.translation import LANGUAGE_SESSION_KEY\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic.base import View\nfrom django.views.i18n import LANGUAGE_QUERY_PARAMETER\n\nfrom kolibri.core.auth.constants import user_kinds\nfrom kolibri.core.auth.models import Role\nfrom kolibri.core.decorators import signin_redirect_exempt\nfrom kolibri.core.hooks import RoleBasedRedirectHook\n\n\n# Modified from django.views.i18n\n@signin_redirect_exempt\ndef set_language(request):\n \"\"\"\n Redirect to a given url while setting the chosen language in the\n session or cookie. The url and the language code need to be\n specified in the request parameters.\n Since this view changes how the user will see the rest of the site, it must\n only be accessed as a POST request. If called as a GET request, it will\n redirect to the page in the request (the 'next' parameter) without changing\n any state.\n \"\"\"\n next = request.POST.get('next', request.GET.get('next'))\n if not is_safe_url(url=next, host=request.get_host()):\n next = request.META.get('HTTP_REFERER')\n if not is_safe_url(url=next, host=request.get_host()):\n next = '/'\n response = http.HttpResponseRedirect(next)\n if request.method == 'POST':\n lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER)\n if lang_code and check_for_language(lang_code):\n next_trans = translate_url(next, lang_code)\n if next_trans != next:\n response = http.HttpResponseRedirect(next_trans)\n if hasattr(request, 'session'):\n request.session[LANGUAGE_SESSION_KEY] = lang_code\n # Always set cookie\n response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,\n max_age=settings.LANGUAGE_COOKIE_AGE,\n path=settings.LANGUAGE_COOKIE_PATH,\n domain=settings.LANGUAGE_COOKIE_DOMAIN)\n return response\n\n\ndef logout_view(request):\n logout(request)\n return http.HttpResponseRedirect('/')\n\n\ndef get_url_by_role(role, first_login):\n obj = next((hook for hook in RoleBasedRedirectHook().registered_hooks\n if hook.role == role and hook.first_login == first_login), None)\n if obj:\n return obj.url\n\n\nclass GuestRedirectView(View):\n def get(self, request):\n \"\"\"\n Redirects a guest user to a learner accessible page.\n \"\"\"\n return HttpResponseRedirect(get_url_by_role(user_kinds.LEARNER, False))\n\n\nclass RootURLRedirectView(View):\n\n def get(self, request):\n \"\"\"\n Redirects user based on the highest role they have for which a redirect is defined.\n \"\"\"\n first_login = request.session.get(\"first_login\", False)\n if request.user.is_authenticated():\n url = None\n if request.user.is_superuser:\n url = url or get_url_by_role(user_kinds.SUPERUSER, first_login)\n roles = set(Role.objects.filter(user_id=request.user.id).values_list('kind', flat=True).distinct())\n if user_kinds.ADMIN in roles:\n url = url or get_url_by_role(user_kinds.ADMIN, first_login)\n if user_kinds.COACH in roles:\n url = url or get_url_by_role(user_kinds.COACH, first_login)\n url = url or get_url_by_role(user_kinds.LEARNER, first_login)\n else:\n url = get_url_by_role(user_kinds.ANONYMOUS, first_login)\n if url:\n return HttpResponseRedirect(url)\n raise Http404(_(\"No appropriate redirect pages found, it is likely that Kolibri is badly configured\"))\n", "path": "kolibri/core/views.py"}]}
2,030
178
gh_patches_debug_35237
rasdani/github-patches
git_diff
scikit-hep__pyhf-1435
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Correlated background simplemodel something like ```python def simplemodel2(s,b_up,b_nom,b_dn): spec = { 'channels': [ { 'name': 'singlechannel', 'samples': [ { 'name': 'signal', 'data': s, 'modifiers': [{'name': 'mu', 'type': 'normfactor', 'data': None}] }, {'name': 'background', 'data': b_nom, 'modifiers': [ { 'name': 'uncorr_bkguncrt', 'type': 'histosys', 'data': { 'hi_data': b_up, 'lo_data': b_dn } } ] } ] } ] } return pyhf.Model(spec) ``` with an API like `pyhf.simplemodels.correlated_bkg` </issue> <code> [start of src/pyhf/simplemodels.py] 1 from . import Model 2 3 __all__ = ["hepdata_like"] 4 5 6 def __dir__(): 7 return __all__ 8 9 10 def hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None): 11 """ 12 Construct a simple single channel :class:`~pyhf.pdf.Model` with a 13 :class:`~pyhf.modifiers.shapesys` modifier representing an uncorrelated 14 background uncertainty. 15 16 Example: 17 >>> import pyhf 18 >>> pyhf.set_backend("numpy") 19 >>> model = pyhf.simplemodels.hepdata_like( 20 ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0] 21 ... ) 22 >>> model.schema 23 'model.json' 24 >>> model.config.channels 25 ['singlechannel'] 26 >>> model.config.samples 27 ['background', 'signal'] 28 >>> model.config.parameters 29 ['mu', 'uncorr_bkguncrt'] 30 >>> model.expected_data(model.config.suggested_init()) 31 array([ 62. , 63. , 277.77777778, 55.18367347]) 32 33 Args: 34 signal_data (:obj:`list`): The data in the signal sample 35 bkg_data (:obj:`list`): The data in the background sample 36 bkg_uncerts (:obj:`list`): The statistical uncertainty on the background sample counts 37 batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute 38 39 Returns: 40 ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema 41 42 """ 43 spec = { 44 'channels': [ 45 { 46 'name': 'singlechannel', 47 'samples': [ 48 { 49 'name': 'signal', 50 'data': signal_data, 51 'modifiers': [ 52 {'name': 'mu', 'type': 'normfactor', 'data': None} 53 ], 54 }, 55 { 56 'name': 'background', 57 'data': bkg_data, 58 'modifiers': [ 59 { 60 'name': 'uncorr_bkguncrt', 61 'type': 'shapesys', 62 'data': bkg_uncerts, 63 } 64 ], 65 }, 66 ], 67 } 68 ] 69 } 70 return Model(spec, batch_size=batch_size) 71 [end of src/pyhf/simplemodels.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/pyhf/simplemodels.py b/src/pyhf/simplemodels.py --- a/src/pyhf/simplemodels.py +++ b/src/pyhf/simplemodels.py @@ -1,12 +1,81 @@ from . import Model -__all__ = ["hepdata_like"] +__all__ = ["correlated_background", "hepdata_like"] def __dir__(): return __all__ +def correlated_background(signal, bkg, bkg_up, bkg_down, batch_size=None): + r""" + Construct a simple single channel :class:`~pyhf.pdf.Model` with a + :class:`~pyhf.modifiers.histosys` modifier representing a background + with a fully correlated bin-by-bin uncertainty. + + Args: + signal (:obj:`list`): The data in the signal sample. + bkg (:obj:`list`): The data in the background sample. + bkg_up (:obj:`list`): The background sample under an upward variation + corresponding to :math:`\alpha=+1`. + bkg_down (:obj:`list`): The background sample under a downward variation + corresponding to :math:`\alpha=-1`. + batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute. + + Returns: + ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema. + + Example: + >>> import pyhf + >>> pyhf.set_backend("numpy") + >>> model = pyhf.simplemodels.correlated_background( + ... signal=[12.0, 11.0], + ... bkg=[50.0, 52.0], + ... bkg_up=[45.0, 57.0], + ... bkg_down=[55.0, 47.0], + ... ) + >>> model.schema + 'model.json' + >>> model.config.channels + ['single_channel'] + >>> model.config.samples + ['background', 'signal'] + >>> model.config.parameters + ['correlated_bkg_uncertainty', 'mu'] + >>> model.expected_data(model.config.suggested_init()) + array([62., 63., 0.]) + + """ + spec = { + "channels": [ + { + "name": "single_channel", + "samples": [ + { + "name": "signal", + "data": signal, + "modifiers": [ + {"name": "mu", "type": "normfactor", "data": None} + ], + }, + { + "name": "background", + "data": bkg, + "modifiers": [ + { + "name": "correlated_bkg_uncertainty", + "type": "histosys", + "data": {"hi_data": bkg_up, "lo_data": bkg_down}, + } + ], + }, + ], + } + ] + } + return Model(spec, batch_size=batch_size) + + def hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None): """ Construct a simple single channel :class:`~pyhf.pdf.Model` with a
{"golden_diff": "diff --git a/src/pyhf/simplemodels.py b/src/pyhf/simplemodels.py\n--- a/src/pyhf/simplemodels.py\n+++ b/src/pyhf/simplemodels.py\n@@ -1,12 +1,81 @@\n from . import Model\n \n-__all__ = [\"hepdata_like\"]\n+__all__ = [\"correlated_background\", \"hepdata_like\"]\n \n \n def __dir__():\n return __all__\n \n \n+def correlated_background(signal, bkg, bkg_up, bkg_down, batch_size=None):\n+ r\"\"\"\n+ Construct a simple single channel :class:`~pyhf.pdf.Model` with a\n+ :class:`~pyhf.modifiers.histosys` modifier representing a background\n+ with a fully correlated bin-by-bin uncertainty.\n+\n+ Args:\n+ signal (:obj:`list`): The data in the signal sample.\n+ bkg (:obj:`list`): The data in the background sample.\n+ bkg_up (:obj:`list`): The background sample under an upward variation\n+ corresponding to :math:`\\alpha=+1`.\n+ bkg_down (:obj:`list`): The background sample under a downward variation\n+ corresponding to :math:`\\alpha=-1`.\n+ batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute.\n+\n+ Returns:\n+ ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema.\n+\n+ Example:\n+ >>> import pyhf\n+ >>> pyhf.set_backend(\"numpy\")\n+ >>> model = pyhf.simplemodels.correlated_background(\n+ ... signal=[12.0, 11.0],\n+ ... bkg=[50.0, 52.0],\n+ ... bkg_up=[45.0, 57.0],\n+ ... bkg_down=[55.0, 47.0],\n+ ... )\n+ >>> model.schema\n+ 'model.json'\n+ >>> model.config.channels\n+ ['single_channel']\n+ >>> model.config.samples\n+ ['background', 'signal']\n+ >>> model.config.parameters\n+ ['correlated_bkg_uncertainty', 'mu']\n+ >>> model.expected_data(model.config.suggested_init())\n+ array([62., 63., 0.])\n+\n+ \"\"\"\n+ spec = {\n+ \"channels\": [\n+ {\n+ \"name\": \"single_channel\",\n+ \"samples\": [\n+ {\n+ \"name\": \"signal\",\n+ \"data\": signal,\n+ \"modifiers\": [\n+ {\"name\": \"mu\", \"type\": \"normfactor\", \"data\": None}\n+ ],\n+ },\n+ {\n+ \"name\": \"background\",\n+ \"data\": bkg,\n+ \"modifiers\": [\n+ {\n+ \"name\": \"correlated_bkg_uncertainty\",\n+ \"type\": \"histosys\",\n+ \"data\": {\"hi_data\": bkg_up, \"lo_data\": bkg_down},\n+ }\n+ ],\n+ },\n+ ],\n+ }\n+ ]\n+ }\n+ return Model(spec, batch_size=batch_size)\n+\n+\n def hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None):\n \"\"\"\n Construct a simple single channel :class:`~pyhf.pdf.Model` with a\n", "issue": "Correlated background simplemodel\nsomething like\r\n\r\n```python\r\ndef simplemodel2(s,b_up,b_nom,b_dn):\r\n spec = {\r\n 'channels': [\r\n {\r\n 'name': 'singlechannel',\r\n 'samples': [\r\n {\r\n 'name': 'signal',\r\n 'data': s,\r\n 'modifiers': [{'name': 'mu', 'type': 'normfactor', 'data': None}]\r\n },\r\n {'name': 'background',\r\n 'data': b_nom,\r\n 'modifiers': [\r\n {\r\n 'name': 'uncorr_bkguncrt',\r\n 'type': 'histosys',\r\n 'data': {\r\n 'hi_data': b_up,\r\n 'lo_data': b_dn\r\n }\r\n }\r\n ]\r\n }\r\n ]\r\n }\r\n ]\r\n }\r\n return pyhf.Model(spec)\r\n\r\n```\r\n\r\nwith an API like `pyhf.simplemodels.correlated_bkg`\n", "before_files": [{"content": "from . import Model\n\n__all__ = [\"hepdata_like\"]\n\n\ndef __dir__():\n return __all__\n\n\ndef hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None):\n \"\"\"\n Construct a simple single channel :class:`~pyhf.pdf.Model` with a\n :class:`~pyhf.modifiers.shapesys` modifier representing an uncorrelated\n background uncertainty.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.hepdata_like(\n ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n ... )\n >>> model.schema\n 'model.json'\n >>> model.config.channels\n ['singlechannel']\n >>> model.config.samples\n ['background', 'signal']\n >>> model.config.parameters\n ['mu', 'uncorr_bkguncrt']\n >>> model.expected_data(model.config.suggested_init())\n array([ 62. , 63. , 277.77777778, 55.18367347])\n\n Args:\n signal_data (:obj:`list`): The data in the signal sample\n bkg_data (:obj:`list`): The data in the background sample\n bkg_uncerts (:obj:`list`): The statistical uncertainty on the background sample counts\n batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute\n\n Returns:\n ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema\n\n \"\"\"\n spec = {\n 'channels': [\n {\n 'name': 'singlechannel',\n 'samples': [\n {\n 'name': 'signal',\n 'data': signal_data,\n 'modifiers': [\n {'name': 'mu', 'type': 'normfactor', 'data': None}\n ],\n },\n {\n 'name': 'background',\n 'data': bkg_data,\n 'modifiers': [\n {\n 'name': 'uncorr_bkguncrt',\n 'type': 'shapesys',\n 'data': bkg_uncerts,\n }\n ],\n },\n ],\n }\n ]\n }\n return Model(spec, batch_size=batch_size)\n", "path": "src/pyhf/simplemodels.py"}]}
1,395
747
gh_patches_debug_432
rasdani/github-patches
git_diff
HybirdCorp__creme_crm-431
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [WIP] fix python3.9.12 3.9.13 ci issue </issue> <code> [start of creme/__init__.py] 1 __version__ = '2.4-alpha1' 2 3 # App registry hooking --------------------------------------------------------- 4 5 try: 6 from django.apps.config import AppConfig 7 from django.apps.registry import Apps 8 except ImportError: 9 # This error may appear with old versions of setuptools during installation 10 import sys 11 12 sys.stderr.write( 13 'Django is not installed ; ' 14 'ignore this message if you are installing Creme.' 15 ) 16 else: 17 AppConfig.all_apps_ready = lambda self: None 18 19 _original_populate = Apps.populate 20 21 def _hooked_populate(self, installed_apps=None): 22 if self.ready: 23 return 24 25 if getattr(self, '_all_apps_ready', False): 26 return 27 28 _original_populate(self, installed_apps) 29 30 with self._lock: 31 if getattr(self, '_all_apps_ready', False): 32 return 33 34 for app_config in self.get_app_configs(): 35 app_config.all_apps_ready() 36 37 self._all_apps_ready = True 38 39 Apps.populate = _hooked_populate 40 [end of creme/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/creme/__init__.py b/creme/__init__.py --- a/creme/__init__.py +++ b/creme/__init__.py @@ -1,5 +1,10 @@ __version__ = '2.4-alpha1' + +def get_version(): + return __version__ + + # App registry hooking --------------------------------------------------------- try:
{"golden_diff": "diff --git a/creme/__init__.py b/creme/__init__.py\n--- a/creme/__init__.py\n+++ b/creme/__init__.py\n@@ -1,5 +1,10 @@\n __version__ = '2.4-alpha1'\n \n+\n+def get_version():\n+ return __version__\n+\n+\n # App registry hooking ---------------------------------------------------------\n \n try:\n", "issue": "[WIP] fix python3.9.12 3.9.13 ci issue\n\n", "before_files": [{"content": "__version__ = '2.4-alpha1'\n\n# App registry hooking ---------------------------------------------------------\n\ntry:\n from django.apps.config import AppConfig\n from django.apps.registry import Apps\nexcept ImportError:\n # This error may appear with old versions of setuptools during installation\n import sys\n\n sys.stderr.write(\n 'Django is not installed ; '\n 'ignore this message if you are installing Creme.'\n )\nelse:\n AppConfig.all_apps_ready = lambda self: None\n\n _original_populate = Apps.populate\n\n def _hooked_populate(self, installed_apps=None):\n if self.ready:\n return\n\n if getattr(self, '_all_apps_ready', False):\n return\n\n _original_populate(self, installed_apps)\n\n with self._lock:\n if getattr(self, '_all_apps_ready', False):\n return\n\n for app_config in self.get_app_configs():\n app_config.all_apps_ready()\n\n self._all_apps_ready = True\n\n Apps.populate = _hooked_populate\n", "path": "creme/__init__.py"}]}
851
88
gh_patches_debug_4679
rasdani/github-patches
git_diff
pyg-team__pytorch_geometric-8343
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Problem with torch_geometric.transforms ### 🐛 Describe the bug --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) [<ipython-input-20-2b41d296395c>](https://localhost:8080/#) in <cell line: 7>() 5 import torch.nn as nn 6 import torch.nn.functional as F ----> 7 import torch_geometric.transforms as T 8 from tqdm.auto import tqdm 9 3 frames [/usr/local/lib/python3.10/dist-packages/torch_geometric/__init__.py](https://localhost:8080/#) in <module> ----> 1 import torch_geometric.utils 2 import torch_geometric.data 3 import torch_geometric.sampler 4 import torch_geometric.loader 5 import torch_geometric.transforms [/usr/local/lib/python3.10/dist-packages/torch_geometric/utils/__init__.py](https://localhost:8080/#) in <module> 1 import copy 2 ----> 3 from .scatter import scatter, group_argsort 4 from .segment import segment 5 from .sort import index_sort [/usr/local/lib/python3.10/dist-packages/torch_geometric/utils/scatter.py](https://localhost:8080/#) in <module> 5 6 import torch_geometric.typing ----> 7 from torch_geometric import warnings 8 from torch_geometric.typing import torch_scatter 9 from torch_geometric.utils.functions import cumsum [/usr/local/lib/python3.10/dist-packages/torch_geometric/warnings.py](https://localhost:8080/#) in <module> 3 import torch_geometric 4 ----> 5 if torch_geometric.typing.WITH_PT20: # pragma: no cover 6 from torch._dynamo import is_compiling as _is_compiling 7 else: AttributeError: partially initialized module 'torch_geometric' has no attribute 'typing' (most likely due to a circular import) ### Environment * PyG version: 2.4.0 * PyTorch version: 2.1.0+cu118 * OS: Windows * Python version: 3.10.12 * CUDA/cuDNN version: Cuda compilation tools, release 11.8, V11.8.89 Build cuda_11.8.r11.8/compiler.31833905_0 cuDNN version: 8900 * How you installed PyTorch and PyG (`conda`, `pip`, source): pip * Any other relevant information (*e.g.*, version of `torch-scatter`): </issue> <code> [start of torch_geometric/warnings.py] 1 import warnings 2 3 import torch_geometric 4 5 if torch_geometric.typing.WITH_PT20: # pragma: no cover 6 from torch._dynamo import is_compiling as _is_compiling 7 else: 8 9 def _is_compiling() -> bool: # pragma: no cover 10 return False 11 12 13 def warn(message: str): 14 if _is_compiling(): 15 return 16 17 warnings.warn(message) 18 19 20 def filterwarnings(action: str, message: str): 21 if _is_compiling(): 22 return 23 24 warnings.filterwarnings(action, message) 25 [end of torch_geometric/warnings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torch_geometric/warnings.py b/torch_geometric/warnings.py --- a/torch_geometric/warnings.py +++ b/torch_geometric/warnings.py @@ -1,13 +1,14 @@ import warnings +import torch + import torch_geometric -if torch_geometric.typing.WITH_PT20: # pragma: no cover - from torch._dynamo import is_compiling as _is_compiling -else: - def _is_compiling() -> bool: # pragma: no cover - return False +def _is_compiling() -> bool: # pragma: no cover + if torch_geometric.typing.WITH_PT21: + return torch._dynamo.is_compiling() + return False def warn(message: str):
{"golden_diff": "diff --git a/torch_geometric/warnings.py b/torch_geometric/warnings.py\n--- a/torch_geometric/warnings.py\n+++ b/torch_geometric/warnings.py\n@@ -1,13 +1,14 @@\n import warnings\n \n+import torch\n+\n import torch_geometric\n \n-if torch_geometric.typing.WITH_PT20: # pragma: no cover\n- from torch._dynamo import is_compiling as _is_compiling\n-else:\n \n- def _is_compiling() -> bool: # pragma: no cover\n- return False\n+def _is_compiling() -> bool: # pragma: no cover\n+ if torch_geometric.typing.WITH_PT21:\n+ return torch._dynamo.is_compiling()\n+ return False\n \n \n def warn(message: str):\n", "issue": "Problem with torch_geometric.transforms\n### \ud83d\udc1b Describe the bug\n\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n[<ipython-input-20-2b41d296395c>](https://localhost:8080/#) in <cell line: 7>()\r\n 5 import torch.nn as nn\r\n 6 import torch.nn.functional as F\r\n----> 7 import torch_geometric.transforms as T\r\n 8 from tqdm.auto import tqdm\r\n 9 \r\n\r\n3 frames\r\n[/usr/local/lib/python3.10/dist-packages/torch_geometric/__init__.py](https://localhost:8080/#) in <module>\r\n----> 1 import torch_geometric.utils\r\n 2 import torch_geometric.data\r\n 3 import torch_geometric.sampler\r\n 4 import torch_geometric.loader\r\n 5 import torch_geometric.transforms\r\n\r\n[/usr/local/lib/python3.10/dist-packages/torch_geometric/utils/__init__.py](https://localhost:8080/#) in <module>\r\n 1 import copy\r\n 2 \r\n----> 3 from .scatter import scatter, group_argsort\r\n 4 from .segment import segment\r\n 5 from .sort import index_sort\r\n\r\n[/usr/local/lib/python3.10/dist-packages/torch_geometric/utils/scatter.py](https://localhost:8080/#) in <module>\r\n 5 \r\n 6 import torch_geometric.typing\r\n----> 7 from torch_geometric import warnings\r\n 8 from torch_geometric.typing import torch_scatter\r\n 9 from torch_geometric.utils.functions import cumsum\r\n\r\n[/usr/local/lib/python3.10/dist-packages/torch_geometric/warnings.py](https://localhost:8080/#) in <module>\r\n 3 import torch_geometric\r\n 4 \r\n----> 5 if torch_geometric.typing.WITH_PT20: # pragma: no cover\r\n 6 from torch._dynamo import is_compiling as _is_compiling\r\n 7 else:\r\n\r\nAttributeError: partially initialized module 'torch_geometric' has no attribute 'typing' (most likely due to a circular import)\n\n### Environment\n\n* PyG version: 2.4.0\r\n* PyTorch version: 2.1.0+cu118\r\n* OS: Windows\r\n* Python version: 3.10.12\r\n* CUDA/cuDNN version:\r\nCuda compilation tools, release 11.8, V11.8.89\r\nBuild cuda_11.8.r11.8/compiler.31833905_0\r\ncuDNN version: 8900\r\n* How you installed PyTorch and PyG (`conda`, `pip`, source): pip\r\n* Any other relevant information (*e.g.*, version of `torch-scatter`):\r\n\n", "before_files": [{"content": "import warnings\n\nimport torch_geometric\n\nif torch_geometric.typing.WITH_PT20: # pragma: no cover\n from torch._dynamo import is_compiling as _is_compiling\nelse:\n\n def _is_compiling() -> bool: # pragma: no cover\n return False\n\n\ndef warn(message: str):\n if _is_compiling():\n return\n\n warnings.warn(message)\n\n\ndef filterwarnings(action: str, message: str):\n if _is_compiling():\n return\n\n warnings.filterwarnings(action, message)\n", "path": "torch_geometric/warnings.py"}]}
1,339
183
gh_patches_debug_47839
rasdani/github-patches
git_diff
holoviz__panel-3157
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add files to `__init__` for autocomplete in VS Code When writing in vs code the autocomplete only works for modules imported in an `__init__.py` file, e.g. `pn.widgets.IntSlider` work but `pn.viewable.Viewer` does not. See here: https://user-images.githubusercontent.com/19758978/150685703-a235b219-6052-4e6e-b1f5-b121dc1f1558.mp4 The solution is pretty easy as `.viewable` only needs to be added to the `__init__`. https://user-images.githubusercontent.com/19758978/150685758-3b1e5468-bcbe-4337-a62a-f3a4da8d9caf.mp4 I don't know if #3132 will fix this. When you have time @MarcSkovMadsen can you check this? </issue> <code> [start of panel/__init__.py] 1 from . import layout # noqa 2 from . import links # noqa 3 from . import pane # noqa 4 from . import param # noqa 5 from . import pipeline # noqa 6 from . import widgets # noqa 7 8 from .config import config, panel_extension as extension, __version__ # noqa 9 from .depends import bind, depends # noqa 10 from .interact import interact # noqa 11 from .io import _jupyter_server_extension_paths, ipywidget, serve, state # noqa 12 from .layout import ( # noqa 13 Accordion, Card, Column, GridSpec, GridBox, FlexBox, Tabs, Row, 14 Spacer, WidgetBox 15 ) 16 from .pane import panel, Pane # noqa 17 from .param import Param # noqa 18 from .template import Template # noqa 19 from .widgets import indicators # noqa 20 [end of panel/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/panel/__init__.py b/panel/__init__.py --- a/panel/__init__.py +++ b/panel/__init__.py @@ -3,6 +3,8 @@ from . import pane # noqa from . import param # noqa from . import pipeline # noqa +from . import reactive # noqa +from . import viewable # noqa from . import widgets # noqa from .config import config, panel_extension as extension, __version__ # noqa
{"golden_diff": "diff --git a/panel/__init__.py b/panel/__init__.py\n--- a/panel/__init__.py\n+++ b/panel/__init__.py\n@@ -3,6 +3,8 @@\n from . import pane # noqa\n from . import param # noqa\n from . import pipeline # noqa\n+from . import reactive # noqa\n+from . import viewable # noqa\n from . import widgets # noqa\n \n from .config import config, panel_extension as extension, __version__ # noqa\n", "issue": "Add files to `__init__` for autocomplete in VS Code\nWhen writing in vs code the autocomplete only works for modules imported in an `__init__.py` file, e.g. `pn.widgets.IntSlider` work but `pn.viewable.Viewer` does not. See here:\r\n\r\nhttps://user-images.githubusercontent.com/19758978/150685703-a235b219-6052-4e6e-b1f5-b121dc1f1558.mp4\r\n\r\nThe solution is pretty easy as `.viewable` only needs to be added to the `__init__`.\r\n\r\nhttps://user-images.githubusercontent.com/19758978/150685758-3b1e5468-bcbe-4337-a62a-f3a4da8d9caf.mp4\r\n\r\nI don't know if #3132 will fix this. When you have time @MarcSkovMadsen can you check this? \n", "before_files": [{"content": "from . import layout # noqa\nfrom . import links # noqa\nfrom . import pane # noqa\nfrom . import param # noqa\nfrom . import pipeline # noqa\nfrom . import widgets # noqa\n\nfrom .config import config, panel_extension as extension, __version__ # noqa\nfrom .depends import bind, depends # noqa\nfrom .interact import interact # noqa\nfrom .io import _jupyter_server_extension_paths, ipywidget, serve, state # noqa\nfrom .layout import ( # noqa\n Accordion, Card, Column, GridSpec, GridBox, FlexBox, Tabs, Row,\n Spacer, WidgetBox\n)\nfrom .pane import panel, Pane # noqa\nfrom .param import Param # noqa\nfrom .template import Template # noqa\nfrom .widgets import indicators # noqa\n", "path": "panel/__init__.py"}]}
971
109
gh_patches_debug_15842
rasdani/github-patches
git_diff
falconry__falcon-174
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update to_query_str to intelligently handle lists </issue> <code> [start of falcon/util.py] 1 """Defines Falcon utility functions 2 3 Copyright 2013 by Rackspace Hosting, Inc. 4 5 Licensed under the Apache License, Version 2.0 (the "License"); 6 you may not use this file except in compliance with the License. 7 You may obtain a copy of the License at 8 9 http://www.apache.org/licenses/LICENSE-2.0 10 11 Unless required by applicable law or agreed to in writing, software 12 distributed under the License is distributed on an "AS IS" BASIS, 13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 See the License for the specific language governing permissions and 15 limitations under the License. 16 17 """ 18 19 import datetime 20 import six 21 22 if six.PY3: # pragma nocover 23 from urllib.parse import quote as url_quote 24 else: # pragma nocover 25 from urllib import quote as url_quote 26 27 28 __all__ = ('dt_to_http', 'http_date_to_dt', 'to_query_str', 'percent_escape') 29 30 31 def dt_to_http(dt): 32 """Converts a datetime instance to an HTTP date string. 33 34 Args: 35 dt: A datetime object, assumed to be UTC 36 37 Returns: 38 An HTTP date string, e.g., "Tue, 15 Nov 1994 12:45:26 GMT". See 39 also: http://goo.gl/R7So4 40 """ 41 42 # Tue, 15 Nov 1994 12:45:26 GMT 43 return dt.strftime('%a, %d %b %Y %H:%M:%S GMT') 44 45 46 def http_date_to_dt(http_date): 47 """Converts an HTTP date string to a datetime instance. 48 49 Args: 50 http_date: An HTTP date string, e.g., "Tue, 15 Nov 1994 12:45:26 GMT". 51 52 Returns: 53 A UTC datetime instance corresponding to the given HTTP date. 54 """ 55 56 return datetime.datetime.strptime( 57 http_date, '%a, %d %b %Y %H:%M:%S %Z') 58 59 60 def to_query_str(params): 61 """Converts a dict of params to afaln actual query string. 62 63 Args: 64 params: dict of simple key-value types, where key is a string and 65 value is a string or something that can be converted into a 66 string. 67 68 Returns: 69 A URI query string starting with '?', or and empty string if there 70 are no params (the dict is empty). 71 """ 72 73 if not params: 74 return '' 75 76 # PERF: This is faster than a list comprehension and join, mainly 77 # because it allows us to inline the value transform. 78 query_str = '?' 79 for k, v in params.items(): 80 if v is True: 81 v = 'true' 82 elif v is False: 83 v = 'false' 84 else: 85 v = str(v) 86 87 query_str += k + '=' + v + '&' 88 89 return query_str[:-1] 90 91 92 def percent_escape(url): 93 """Percent-escape reserved characters in the given url. 94 95 Args: 96 url: A full or relative URL. 97 98 Returns: 99 An escaped version of the URL, excluding '/', ',' and ':' 100 characters. In Python 2, unicode URL strings will be first 101 encoded to a UTF-8 byte string to work around a urllib 102 bug. 103 """ 104 105 # Convert the string so that urllib.quote does not complain 106 # if it actually has Unicode chars in it. 107 if not six.PY3 and isinstance(url, six.text_type): # pragma nocover 108 url = url.encode('utf-8') 109 110 return url_quote(url, safe='/:,=?&-_') 111 [end of falcon/util.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/falcon/util.py b/falcon/util.py --- a/falcon/util.py +++ b/falcon/util.py @@ -63,7 +63,8 @@ Args: params: dict of simple key-value types, where key is a string and value is a string or something that can be converted into a - string. + string. If value is a list, it will be converted to a comma- + delimited string (e.g., thing=1,2,3) Returns: A URI query string starting with '?', or and empty string if there @@ -81,6 +82,8 @@ v = 'true' elif v is False: v = 'false' + elif isinstance(v, list): + v = ','.join([str(i) for i in v]) else: v = str(v)
{"golden_diff": "diff --git a/falcon/util.py b/falcon/util.py\n--- a/falcon/util.py\n+++ b/falcon/util.py\n@@ -63,7 +63,8 @@\n Args:\n params: dict of simple key-value types, where key is a string and\n value is a string or something that can be converted into a\n- string.\n+ string. If value is a list, it will be converted to a comma-\n+ delimited string (e.g., thing=1,2,3)\n \n Returns:\n A URI query string starting with '?', or and empty string if there\n@@ -81,6 +82,8 @@\n v = 'true'\n elif v is False:\n v = 'false'\n+ elif isinstance(v, list):\n+ v = ','.join([str(i) for i in v])\n else:\n v = str(v)\n", "issue": "Update to_query_str to intelligently handle lists\n\n", "before_files": [{"content": "\"\"\"Defines Falcon utility functions\n\nCopyright 2013 by Rackspace Hosting, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nimport datetime\nimport six\n\nif six.PY3: # pragma nocover\n from urllib.parse import quote as url_quote\nelse: # pragma nocover\n from urllib import quote as url_quote\n\n\n__all__ = ('dt_to_http', 'http_date_to_dt', 'to_query_str', 'percent_escape')\n\n\ndef dt_to_http(dt):\n \"\"\"Converts a datetime instance to an HTTP date string.\n\n Args:\n dt: A datetime object, assumed to be UTC\n\n Returns:\n An HTTP date string, e.g., \"Tue, 15 Nov 1994 12:45:26 GMT\". See\n also: http://goo.gl/R7So4\n \"\"\"\n\n # Tue, 15 Nov 1994 12:45:26 GMT\n return dt.strftime('%a, %d %b %Y %H:%M:%S GMT')\n\n\ndef http_date_to_dt(http_date):\n \"\"\"Converts an HTTP date string to a datetime instance.\n\n Args:\n http_date: An HTTP date string, e.g., \"Tue, 15 Nov 1994 12:45:26 GMT\".\n\n Returns:\n A UTC datetime instance corresponding to the given HTTP date.\n \"\"\"\n\n return datetime.datetime.strptime(\n http_date, '%a, %d %b %Y %H:%M:%S %Z')\n\n\ndef to_query_str(params):\n \"\"\"Converts a dict of params to afaln actual query string.\n\n Args:\n params: dict of simple key-value types, where key is a string and\n value is a string or something that can be converted into a\n string.\n\n Returns:\n A URI query string starting with '?', or and empty string if there\n are no params (the dict is empty).\n \"\"\"\n\n if not params:\n return ''\n\n # PERF: This is faster than a list comprehension and join, mainly\n # because it allows us to inline the value transform.\n query_str = '?'\n for k, v in params.items():\n if v is True:\n v = 'true'\n elif v is False:\n v = 'false'\n else:\n v = str(v)\n\n query_str += k + '=' + v + '&'\n\n return query_str[:-1]\n\n\ndef percent_escape(url):\n \"\"\"Percent-escape reserved characters in the given url.\n\n Args:\n url: A full or relative URL.\n\n Returns:\n An escaped version of the URL, excluding '/', ',' and ':'\n characters. In Python 2, unicode URL strings will be first\n encoded to a UTF-8 byte string to work around a urllib\n bug.\n \"\"\"\n\n # Convert the string so that urllib.quote does not complain\n # if it actually has Unicode chars in it.\n if not six.PY3 and isinstance(url, six.text_type): # pragma nocover\n url = url.encode('utf-8')\n\n return url_quote(url, safe='/:,=?&-_')\n", "path": "falcon/util.py"}]}
1,577
195
gh_patches_debug_6563
rasdani/github-patches
git_diff
cornellius-gp__gpytorch-1195
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Bug] Cannot serialize/deserialize SmoothedBoxPrior when some args are broadcast # 🐛 Bug It seems like `SmoothedBoxPrior` for >1d doesn't work with serialization/deserialization when only some args are broadcast. ## To reproduce ```python import torch import gpytorch pr = gpytorch.priors.SmoothedBoxPrior(torch.zeros(2), torch.ones(2)) pr.load_state_dict(pr.state_dict()) ``` ** Stack trace/error message ** ``` --------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-5-6b4b2e881beb> in <module> 2 import gpytorch 3 pr = gpytorch.priors.SmoothedBoxPrior(torch.zeros(2), torch.ones(2)) ----> 4 pr.load_state_dict(pr.state_dict()) <...PATH..>/torch/nn/modules/module.py in load_state_dict(self, state_dict, strict) 877 if len(error_msgs) > 0: 878 raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( --> 879 self.__class__.__name__, "\n\t".join(error_msgs))) 880 return _IncompatibleKeys(missing_keys, unexpected_keys) 881 RuntimeError: Error(s) in loading state_dict for SmoothedBoxPrior: While copying the parameter named "sigma", whose dimensions in the model are torch.Size([2]) and whose dimensions in the checkpoint are torch.Size([2]), an exception occured : ('unsupported operation: more than one element of the written-to tensor refers to a single memory location. Please clone() the tensor before performing the operation.',). ``` Note that `SmoothedBoxPrior(a=torch.zeros(2), b=torch.ones(2), sigma=torch.ones(2)*0.01)` succeeds, as does `gpytorch.priors.GammaPrior(torch.ones(2),1)`. ## Expected Behavior Successful load. ## System information **Please complete the following information:** - gpytorch version: 1.1.1 - pytorch version: 1.5.0 - OS: tested on Centos and Mac OSX. </issue> <code> [start of gpytorch/priors/smoothed_box_prior.py] 1 #!/usr/bin/env python3 2 3 import math 4 from numbers import Number 5 6 import torch 7 from torch.distributions import constraints 8 from torch.distributions.utils import broadcast_all 9 from torch.nn import Module as TModule 10 11 from .prior import Prior 12 from .torch_priors import NormalPrior 13 14 15 class SmoothedBoxPrior(Prior): 16 r"""A smoothed approximation of a uniform prior. 17 18 Has full support on the reals and is differentiable everywhere. 19 20 .. math:: 21 22 \begin{equation*} 23 B = {x: a_i <= x_i <= b_i} 24 d(x, B) = min_{x' in B} |x - x'| 25 pdf(x) ~ exp(- d(x, B)**2 / sqrt(2 * sigma^2)) 26 \end{equation*} 27 28 """ 29 30 arg_constraints = {"sigma": constraints.positive, "a": constraints.real, "b": constraints.real} 31 support = constraints.real 32 _validate_args = True 33 34 def __init__(self, a, b, sigma=0.01, validate_args=False, transform=None): 35 TModule.__init__(self) 36 _a = torch.tensor(float(a)) if isinstance(a, Number) else a 37 _a = _a.view(-1) if _a.dim() < 1 else _a 38 _a, _b, _sigma = broadcast_all(_a, b, sigma) 39 if not torch.all(constraints.less_than(_b).check(_a)): 40 raise ValueError("must have that a < b (element-wise)") 41 # TODO: Proper argument validation including broadcasting 42 batch_shape, event_shape = _a.shape[:-1], _a.shape[-1:] 43 # need to assign values before registering as buffers to make argument validation work 44 self.a, self.b, self.sigma = _a, _b, _sigma 45 super(SmoothedBoxPrior, self).__init__(batch_shape, event_shape, validate_args=validate_args) 46 # now need to delete to be able to register buffer 47 del self.a, self.b, self.sigma 48 self.register_buffer("a", _a) 49 self.register_buffer("b", _b) 50 self.register_buffer("sigma", _sigma) 51 self.tails = NormalPrior(torch.zeros_like(_a), _sigma, validate_args=validate_args) 52 self._transform = transform 53 54 @property 55 def _c(self): 56 return (self.a + self.b) / 2 57 58 @property 59 def _r(self): 60 return (self.b - self.a) / 2 61 62 @property 63 def _M(self): 64 # normalization factor to make this a probability distribution 65 return torch.log(1 + (self.b - self.a) / (math.sqrt(2 * math.pi) * self.sigma)) 66 67 def log_prob(self, x): 68 return self._log_prob(self.transform(x)) 69 70 def _log_prob(self, x): 71 # x = "distances from box`" 72 X = ((x - self._c).abs_() - self._r).clamp(min=0) 73 return (self.tails.log_prob(X) - self._M).sum(-1) 74 [end of gpytorch/priors/smoothed_box_prior.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gpytorch/priors/smoothed_box_prior.py b/gpytorch/priors/smoothed_box_prior.py --- a/gpytorch/priors/smoothed_box_prior.py +++ b/gpytorch/priors/smoothed_box_prior.py @@ -47,7 +47,7 @@ del self.a, self.b, self.sigma self.register_buffer("a", _a) self.register_buffer("b", _b) - self.register_buffer("sigma", _sigma) + self.register_buffer("sigma", _sigma.clone()) self.tails = NormalPrior(torch.zeros_like(_a), _sigma, validate_args=validate_args) self._transform = transform
{"golden_diff": "diff --git a/gpytorch/priors/smoothed_box_prior.py b/gpytorch/priors/smoothed_box_prior.py\n--- a/gpytorch/priors/smoothed_box_prior.py\n+++ b/gpytorch/priors/smoothed_box_prior.py\n@@ -47,7 +47,7 @@\n del self.a, self.b, self.sigma\n self.register_buffer(\"a\", _a)\n self.register_buffer(\"b\", _b)\n- self.register_buffer(\"sigma\", _sigma)\n+ self.register_buffer(\"sigma\", _sigma.clone())\n self.tails = NormalPrior(torch.zeros_like(_a), _sigma, validate_args=validate_args)\n self._transform = transform\n", "issue": "[Bug] Cannot serialize/deserialize SmoothedBoxPrior when some args are broadcast\n# \ud83d\udc1b Bug\r\n\r\nIt seems like `SmoothedBoxPrior` for >1d doesn't work with serialization/deserialization when only some args are broadcast. \r\n\r\n## To reproduce\r\n\r\n```python\r\nimport torch\r\nimport gpytorch\r\npr = gpytorch.priors.SmoothedBoxPrior(torch.zeros(2), torch.ones(2))\r\npr.load_state_dict(pr.state_dict())\r\n```\r\n\r\n** Stack trace/error message **\r\n```\r\n---------------------------------------------------------------------------\r\nRuntimeError Traceback (most recent call last)\r\n<ipython-input-5-6b4b2e881beb> in <module>\r\n 2 import gpytorch\r\n 3 pr = gpytorch.priors.SmoothedBoxPrior(torch.zeros(2), torch.ones(2))\r\n----> 4 pr.load_state_dict(pr.state_dict())\r\n\r\n<...PATH..>/torch/nn/modules/module.py in load_state_dict(self, state_dict, strict)\r\n 877 if len(error_msgs) > 0:\r\n 878 raise RuntimeError('Error(s) in loading state_dict for {}:\\n\\t{}'.format(\r\n--> 879 self.__class__.__name__, \"\\n\\t\".join(error_msgs)))\r\n 880 return _IncompatibleKeys(missing_keys, unexpected_keys)\r\n 881 \r\n\r\nRuntimeError: Error(s) in loading state_dict for SmoothedBoxPrior:\r\n\tWhile copying the parameter named \"sigma\", whose dimensions in the model are torch.Size([2]) and whose dimensions in the checkpoint are torch.Size([2]), an exception occured : ('unsupported operation: more than one element of the written-to tensor refers to a single memory location. Please clone() the tensor before performing the operation.',).\r\n\r\n```\r\n\r\nNote that `SmoothedBoxPrior(a=torch.zeros(2), b=torch.ones(2), sigma=torch.ones(2)*0.01)` succeeds, as does `gpytorch.priors.GammaPrior(torch.ones(2),1)`.\r\n\r\n## Expected Behavior\r\n\r\nSuccessful load. \r\n\r\n## System information\r\n\r\n**Please complete the following information:**\r\n- gpytorch version: 1.1.1\r\n- pytorch version: 1.5.0\r\n- OS: tested on Centos and Mac OSX. \r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport math\nfrom numbers import Number\n\nimport torch\nfrom torch.distributions import constraints\nfrom torch.distributions.utils import broadcast_all\nfrom torch.nn import Module as TModule\n\nfrom .prior import Prior\nfrom .torch_priors import NormalPrior\n\n\nclass SmoothedBoxPrior(Prior):\n r\"\"\"A smoothed approximation of a uniform prior.\n\n Has full support on the reals and is differentiable everywhere.\n\n .. math::\n\n \\begin{equation*}\n B = {x: a_i <= x_i <= b_i}\n d(x, B) = min_{x' in B} |x - x'|\n pdf(x) ~ exp(- d(x, B)**2 / sqrt(2 * sigma^2))\n \\end{equation*}\n\n \"\"\"\n\n arg_constraints = {\"sigma\": constraints.positive, \"a\": constraints.real, \"b\": constraints.real}\n support = constraints.real\n _validate_args = True\n\n def __init__(self, a, b, sigma=0.01, validate_args=False, transform=None):\n TModule.__init__(self)\n _a = torch.tensor(float(a)) if isinstance(a, Number) else a\n _a = _a.view(-1) if _a.dim() < 1 else _a\n _a, _b, _sigma = broadcast_all(_a, b, sigma)\n if not torch.all(constraints.less_than(_b).check(_a)):\n raise ValueError(\"must have that a < b (element-wise)\")\n # TODO: Proper argument validation including broadcasting\n batch_shape, event_shape = _a.shape[:-1], _a.shape[-1:]\n # need to assign values before registering as buffers to make argument validation work\n self.a, self.b, self.sigma = _a, _b, _sigma\n super(SmoothedBoxPrior, self).__init__(batch_shape, event_shape, validate_args=validate_args)\n # now need to delete to be able to register buffer\n del self.a, self.b, self.sigma\n self.register_buffer(\"a\", _a)\n self.register_buffer(\"b\", _b)\n self.register_buffer(\"sigma\", _sigma)\n self.tails = NormalPrior(torch.zeros_like(_a), _sigma, validate_args=validate_args)\n self._transform = transform\n\n @property\n def _c(self):\n return (self.a + self.b) / 2\n\n @property\n def _r(self):\n return (self.b - self.a) / 2\n\n @property\n def _M(self):\n # normalization factor to make this a probability distribution\n return torch.log(1 + (self.b - self.a) / (math.sqrt(2 * math.pi) * self.sigma))\n\n def log_prob(self, x):\n return self._log_prob(self.transform(x))\n\n def _log_prob(self, x):\n # x = \"distances from box`\"\n X = ((x - self._c).abs_() - self._r).clamp(min=0)\n return (self.tails.log_prob(X) - self._M).sum(-1)\n", "path": "gpytorch/priors/smoothed_box_prior.py"}]}
1,869
152
gh_patches_debug_4473
rasdani/github-patches
git_diff
facebookresearch__hydra-1685
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CI fails with UserWarning raised from importing nevergrad example failure here https://app.circleci.com/pipelines/github/facebookresearch/hydra/10584/workflows/d4c57363-bb31-42f4-a7ee-29c28a577f67/jobs/95695 this can be reproduced by simply importing nevergrad ``` >>> import nevergrad as ng /Users/jieru/opt/anaconda3/envs/testnv/lib/python3.8/site-packages/cma/s.py:13: UserWarning: Could not import matplotlib.pyplot, therefore ``cma.plot()`` etc. is not available _warnings.warn('Could not import matplotlib.pyplot, therefore' ``` the warnings comes from one of nevergrad's dependency `cma` which just had a new release https://github.com/CMA-ES/pycma/releases </issue> <code> [start of plugins/hydra_nevergrad_sweeper/setup.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 # type: ignore 3 from pathlib import Path 4 5 from read_version import read_version 6 from setuptools import find_namespace_packages, setup 7 8 setup( 9 name="hydra-nevergrad-sweeper", 10 version=read_version("hydra_plugins/hydra_nevergrad_sweeper", "__init__.py"), 11 author="Jeremy Rapin, Omry Yadan, Jieru Hu", 12 author_email="[email protected], [email protected], [email protected]", 13 description="Hydra Nevergrad Sweeper plugin", 14 long_description=(Path(__file__).parent / "README.md").read_text(), 15 long_description_content_type="text/markdown", 16 url="https://github.com/facebookresearch/hydra/", 17 packages=find_namespace_packages(include=["hydra_plugins.*"]), 18 classifiers=[ 19 "License :: OSI Approved :: MIT License", 20 "Programming Language :: Python :: 3.6", 21 "Programming Language :: Python :: 3.7", 22 "Programming Language :: Python :: 3.8", 23 "Programming Language :: Python :: 3.9", 24 "Operating System :: OS Independent", 25 "Development Status :: 4 - Beta", 26 ], 27 install_requires=[ 28 "hydra-core>=1.1.0.dev7", 29 "nevergrad>=0.4.3.post2", 30 "numpy<1.20.0", # remove once nevergrad is upgraded to support numpy 1.20 31 ], 32 include_package_data=True, 33 ) 34 [end of plugins/hydra_nevergrad_sweeper/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugins/hydra_nevergrad_sweeper/setup.py b/plugins/hydra_nevergrad_sweeper/setup.py --- a/plugins/hydra_nevergrad_sweeper/setup.py +++ b/plugins/hydra_nevergrad_sweeper/setup.py @@ -27,6 +27,7 @@ install_requires=[ "hydra-core>=1.1.0.dev7", "nevergrad>=0.4.3.post2", + "cma==3.0.3", # https://github.com/facebookresearch/hydra/issues/1684 "numpy<1.20.0", # remove once nevergrad is upgraded to support numpy 1.20 ], include_package_data=True,
{"golden_diff": "diff --git a/plugins/hydra_nevergrad_sweeper/setup.py b/plugins/hydra_nevergrad_sweeper/setup.py\n--- a/plugins/hydra_nevergrad_sweeper/setup.py\n+++ b/plugins/hydra_nevergrad_sweeper/setup.py\n@@ -27,6 +27,7 @@\n install_requires=[\n \"hydra-core>=1.1.0.dev7\",\n \"nevergrad>=0.4.3.post2\",\n+ \"cma==3.0.3\", # https://github.com/facebookresearch/hydra/issues/1684\n \"numpy<1.20.0\", # remove once nevergrad is upgraded to support numpy 1.20\n ],\n include_package_data=True,\n", "issue": "CI fails with UserWarning raised from importing nevergrad\nexample failure here https://app.circleci.com/pipelines/github/facebookresearch/hydra/10584/workflows/d4c57363-bb31-42f4-a7ee-29c28a577f67/jobs/95695\r\n\r\nthis can be reproduced by simply importing nevergrad\r\n```\r\n>>> import nevergrad as ng\r\n/Users/jieru/opt/anaconda3/envs/testnv/lib/python3.8/site-packages/cma/s.py:13: UserWarning: Could not import matplotlib.pyplot, therefore ``cma.plot()`` etc. is not available\r\n _warnings.warn('Could not import matplotlib.pyplot, therefore'\r\n```\r\n\r\nthe warnings comes from one of nevergrad's dependency `cma` which just had a new release https://github.com/CMA-ES/pycma/releases\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom pathlib import Path\n\nfrom read_version import read_version\nfrom setuptools import find_namespace_packages, setup\n\nsetup(\n name=\"hydra-nevergrad-sweeper\",\n version=read_version(\"hydra_plugins/hydra_nevergrad_sweeper\", \"__init__.py\"),\n author=\"Jeremy Rapin, Omry Yadan, Jieru Hu\",\n author_email=\"[email protected], [email protected], [email protected]\",\n description=\"Hydra Nevergrad Sweeper plugin\",\n long_description=(Path(__file__).parent / \"README.md\").read_text(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\n \"hydra-core>=1.1.0.dev7\",\n \"nevergrad>=0.4.3.post2\",\n \"numpy<1.20.0\", # remove once nevergrad is upgraded to support numpy 1.20\n ],\n include_package_data=True,\n)\n", "path": "plugins/hydra_nevergrad_sweeper/setup.py"}]}
1,139
170
gh_patches_debug_2196
rasdani/github-patches
git_diff
getredash__redash-1119
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> User should be able to delete an Alert Can't remove Alert with UI. Directly run sql as below. ``` sql delete from alerts where id = 〜 ``` </issue> <code> [start of redash/handlers/alerts.py] 1 import time 2 3 from flask import request 4 from funcy import project 5 6 from redash import models 7 from redash.permissions import require_access, require_admin_or_owner, view_only, require_permission 8 from redash.handlers.base import BaseResource, require_fields, get_object_or_404 9 10 11 class AlertResource(BaseResource): 12 def get(self, alert_id): 13 alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org) 14 require_access(alert.groups, self.current_user, view_only) 15 return alert.to_dict() 16 17 def post(self, alert_id): 18 req = request.get_json(True) 19 params = project(req, ('options', 'name', 'query_id', 'rearm')) 20 alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org) 21 require_admin_or_owner(alert.user.id) 22 23 if 'query_id' in params: 24 params['query'] = params.pop('query_id') 25 26 alert.update_instance(**params) 27 28 self.record_event({ 29 'action': 'edit', 30 'timestamp': int(time.time()), 31 'object_id': alert.id, 32 'object_type': 'alert' 33 }) 34 35 return alert.to_dict() 36 37 38 class AlertListResource(BaseResource): 39 def post(self): 40 req = request.get_json(True) 41 require_fields(req, ('options', 'name', 'query_id')) 42 43 query = models.Query.get_by_id_and_org(req['query_id'], self.current_org) 44 require_access(query.groups, self.current_user, view_only) 45 46 alert = models.Alert.create( 47 name=req['name'], 48 query=query, 49 user=self.current_user, 50 options=req['options'] 51 ) 52 53 self.record_event({ 54 'action': 'create', 55 'timestamp': int(time.time()), 56 'object_id': alert.id, 57 'object_type': 'alert' 58 }) 59 60 return alert.to_dict() 61 62 @require_permission('list_alerts') 63 def get(self): 64 return [alert.to_dict() for alert in models.Alert.all(groups=self.current_user.groups)] 65 66 67 class AlertSubscriptionListResource(BaseResource): 68 def post(self, alert_id): 69 req = request.get_json(True) 70 71 alert = models.Alert.get_by_id_and_org(alert_id, self.current_org) 72 require_access(alert.groups, self.current_user, view_only) 73 kwargs = {'alert': alert, 'user': self.current_user} 74 75 if 'destination_id' in req: 76 destination = models.NotificationDestination.get_by_id_and_org(req['destination_id'], self.current_org) 77 kwargs['destination'] = destination 78 79 subscription = models.AlertSubscription.create(**kwargs) 80 81 self.record_event({ 82 'action': 'subscribe', 83 'timestamp': int(time.time()), 84 'object_id': alert_id, 85 'object_type': 'alert', 86 'destination': req.get('destination_id') 87 }) 88 89 return subscription.to_dict() 90 91 def get(self, alert_id): 92 alert = models.Alert.get_by_id_and_org(alert_id, self.current_org) 93 require_access(alert.groups, self.current_user, view_only) 94 95 subscriptions = models.AlertSubscription.all(alert_id) 96 return [s.to_dict() for s in subscriptions] 97 98 99 class AlertSubscriptionResource(BaseResource): 100 def delete(self, alert_id, subscriber_id): 101 102 subscription = get_object_or_404(models.AlertSubscription.get_by_id, subscriber_id) 103 require_admin_or_owner(subscription.user.id) 104 subscription.delete_instance() 105 106 self.record_event({ 107 'action': 'unsubscribe', 108 'timestamp': int(time.time()), 109 'object_id': alert_id, 110 'object_type': 'alert' 111 }) 112 113 [end of redash/handlers/alerts.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/redash/handlers/alerts.py b/redash/handlers/alerts.py --- a/redash/handlers/alerts.py +++ b/redash/handlers/alerts.py @@ -34,6 +34,11 @@ return alert.to_dict() + def delete(self, alert_id): + alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org) + require_admin_or_owner(alert.user.id) + alert.delete_instance(recursive=True) + class AlertListResource(BaseResource): def post(self):
{"golden_diff": "diff --git a/redash/handlers/alerts.py b/redash/handlers/alerts.py\n--- a/redash/handlers/alerts.py\n+++ b/redash/handlers/alerts.py\n@@ -34,6 +34,11 @@\n \n return alert.to_dict()\n \n+ def delete(self, alert_id):\n+ alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)\n+ require_admin_or_owner(alert.user.id)\n+ alert.delete_instance(recursive=True)\n+\n \n class AlertListResource(BaseResource):\n def post(self):\n", "issue": "User should be able to delete an Alert\nCan't remove Alert with UI.\n\nDirectly run sql as below.\n\n``` sql\ndelete from alerts where id = \u301c\n```\n\n", "before_files": [{"content": "import time\n\nfrom flask import request\nfrom funcy import project\n\nfrom redash import models\nfrom redash.permissions import require_access, require_admin_or_owner, view_only, require_permission\nfrom redash.handlers.base import BaseResource, require_fields, get_object_or_404\n\n\nclass AlertResource(BaseResource):\n def get(self, alert_id):\n alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)\n require_access(alert.groups, self.current_user, view_only)\n return alert.to_dict()\n\n def post(self, alert_id):\n req = request.get_json(True)\n params = project(req, ('options', 'name', 'query_id', 'rearm'))\n alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)\n require_admin_or_owner(alert.user.id)\n\n if 'query_id' in params:\n params['query'] = params.pop('query_id')\n\n alert.update_instance(**params)\n\n self.record_event({\n 'action': 'edit',\n 'timestamp': int(time.time()),\n 'object_id': alert.id,\n 'object_type': 'alert'\n })\n\n return alert.to_dict()\n\n\nclass AlertListResource(BaseResource):\n def post(self):\n req = request.get_json(True)\n require_fields(req, ('options', 'name', 'query_id'))\n\n query = models.Query.get_by_id_and_org(req['query_id'], self.current_org)\n require_access(query.groups, self.current_user, view_only)\n\n alert = models.Alert.create(\n name=req['name'],\n query=query,\n user=self.current_user,\n options=req['options']\n )\n\n self.record_event({\n 'action': 'create',\n 'timestamp': int(time.time()),\n 'object_id': alert.id,\n 'object_type': 'alert'\n })\n\n return alert.to_dict()\n\n @require_permission('list_alerts')\n def get(self):\n return [alert.to_dict() for alert in models.Alert.all(groups=self.current_user.groups)]\n\n\nclass AlertSubscriptionListResource(BaseResource):\n def post(self, alert_id):\n req = request.get_json(True)\n\n alert = models.Alert.get_by_id_and_org(alert_id, self.current_org)\n require_access(alert.groups, self.current_user, view_only)\n kwargs = {'alert': alert, 'user': self.current_user}\n\n if 'destination_id' in req:\n destination = models.NotificationDestination.get_by_id_and_org(req['destination_id'], self.current_org)\n kwargs['destination'] = destination\n\n subscription = models.AlertSubscription.create(**kwargs)\n\n self.record_event({\n 'action': 'subscribe',\n 'timestamp': int(time.time()),\n 'object_id': alert_id,\n 'object_type': 'alert',\n 'destination': req.get('destination_id')\n })\n\n return subscription.to_dict()\n\n def get(self, alert_id):\n alert = models.Alert.get_by_id_and_org(alert_id, self.current_org)\n require_access(alert.groups, self.current_user, view_only)\n\n subscriptions = models.AlertSubscription.all(alert_id)\n return [s.to_dict() for s in subscriptions]\n\n\nclass AlertSubscriptionResource(BaseResource):\n def delete(self, alert_id, subscriber_id):\n \n subscription = get_object_or_404(models.AlertSubscription.get_by_id, subscriber_id)\n require_admin_or_owner(subscription.user.id)\n subscription.delete_instance()\n\n self.record_event({\n 'action': 'unsubscribe',\n 'timestamp': int(time.time()),\n 'object_id': alert_id,\n 'object_type': 'alert'\n })\n\n", "path": "redash/handlers/alerts.py"}]}
1,595
134
gh_patches_debug_4407
rasdani/github-patches
git_diff
pwndbg__pwndbg-1222
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ipi doesn't work with multiline inputs TL;DR: <img width="550" alt="image" src="https://user-images.githubusercontent.com/10009354/193942063-af410d4d-3cdd-4bcb-a102-9bb87d101656.png"> ``` pwndbg> ipi In [1]: from ctypes import * In [2]: class A(LittleEndianStructure): ...: a = LittleEndianStructure ...: --------------------------------------------------------------------------- NameError Traceback (most recent call last) <ipython-input-2-814bd2a1d7ec> in <module> ----> 1 class A(LittleEndianStructure): 2 a = LittleEndianStructure 3 <ipython-input-2-814bd2a1d7ec> in A() 1 class A(LittleEndianStructure): ----> 2 a = LittleEndianStructure 3 NameError: name 'LittleEndianStructure' is not defined In [3]: ctypes --------------------------------------------------------------------------- NameError Traceback (most recent call last) <ipython-input-3-8c9cdb26e3f3> in <module> ----> 1 ctypes NameError: name 'ctypes' is not defined In [4]: LittleEndianStructure Out[4]: _ctypes.Structure In [5]: def foo(): ...: return LittleEndianStructure ...: In [6]: foo() --------------------------------------------------------------------------- NameError Traceback (most recent call last) <ipython-input-6-c19b6d9633cf> in <module> ----> 1 foo() <ipython-input-5-0b19aa36e370> in foo() 1 def foo(): ----> 2 return LittleEndianStructure 3 NameError: name 'LittleEndianStructure' is not defined In [7]: ``` </issue> <code> [start of pwndbg/commands/ipython_interactive.py] 1 """ 2 Command to start an interactive IPython prompt. 3 """ 4 import sys 5 from contextlib import contextmanager 6 7 import gdb 8 9 import pwndbg.color.message as M 10 import pwndbg.commands 11 import pwndbg.lib.stdio 12 13 14 @contextmanager 15 def switch_to_ipython_env(): 16 """We need to change stdout/stderr to the default ones, otherwise we can't use tab or autocomplete""" 17 # Save GDB's excepthook 18 saved_excepthook = sys.excepthook 19 # Switch to default stdout/stderr 20 with pwndbg.lib.stdio.stdio: 21 yield 22 # Restore Python's default ps1, ps2, and excepthook for GDB's `pi` command 23 sys.ps1 = ">>> " 24 sys.ps2 = "... " 25 sys.excepthook = saved_excepthook 26 27 28 @pwndbg.commands.ArgparsedCommand("Start an interactive IPython prompt.") 29 def ipi(): 30 with switch_to_ipython_env(): 31 # Use `gdb.execute` to embed IPython into GDB's variable scope 32 try: 33 gdb.execute("pi import IPython") 34 except gdb.error: 35 print( 36 M.warn( 37 "Cannot import IPython.\n" 38 "You need to install IPython if you want to use this command.\n" 39 "Maybe you can try `pip install ipython` first." 40 ) 41 ) 42 return 43 code4ipython = """import jedi 44 import pwn 45 jedi.Interpreter._allow_descriptor_getattr_default = False 46 IPython.embed(colors='neutral',banner1='',confirm_exit=False,simple_prompt=False) 47 """ 48 gdb.execute(f"py\n{code4ipython}") 49 [end of pwndbg/commands/ipython_interactive.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwndbg/commands/ipython_interactive.py b/pwndbg/commands/ipython_interactive.py --- a/pwndbg/commands/ipython_interactive.py +++ b/pwndbg/commands/ipython_interactive.py @@ -43,6 +43,6 @@ code4ipython = """import jedi import pwn jedi.Interpreter._allow_descriptor_getattr_default = False -IPython.embed(colors='neutral',banner1='',confirm_exit=False,simple_prompt=False) +IPython.embed(colors='neutral',banner1='',confirm_exit=False,simple_prompt=False, user_ns=globals()) """ gdb.execute(f"py\n{code4ipython}")
{"golden_diff": "diff --git a/pwndbg/commands/ipython_interactive.py b/pwndbg/commands/ipython_interactive.py\n--- a/pwndbg/commands/ipython_interactive.py\n+++ b/pwndbg/commands/ipython_interactive.py\n@@ -43,6 +43,6 @@\n code4ipython = \"\"\"import jedi\n import pwn\n jedi.Interpreter._allow_descriptor_getattr_default = False\n-IPython.embed(colors='neutral',banner1='',confirm_exit=False,simple_prompt=False)\n+IPython.embed(colors='neutral',banner1='',confirm_exit=False,simple_prompt=False, user_ns=globals())\n \"\"\"\n gdb.execute(f\"py\\n{code4ipython}\")\n", "issue": "ipi doesn't work with multiline inputs\nTL;DR:\r\n<img width=\"550\" alt=\"image\" src=\"https://user-images.githubusercontent.com/10009354/193942063-af410d4d-3cdd-4bcb-a102-9bb87d101656.png\">\r\n\r\n```\r\npwndbg> ipi\r\n\r\nIn [1]: from ctypes import *\r\n\r\nIn [2]: class A(LittleEndianStructure):\r\n ...: a = LittleEndianStructure\r\n ...:\r\n---------------------------------------------------------------------------\r\nNameError Traceback (most recent call last)\r\n<ipython-input-2-814bd2a1d7ec> in <module>\r\n----> 1 class A(LittleEndianStructure):\r\n 2 a = LittleEndianStructure\r\n 3\r\n\r\n<ipython-input-2-814bd2a1d7ec> in A()\r\n 1 class A(LittleEndianStructure):\r\n----> 2 a = LittleEndianStructure\r\n 3\r\n\r\nNameError: name 'LittleEndianStructure' is not defined\r\n\r\nIn [3]: ctypes\r\n---------------------------------------------------------------------------\r\nNameError Traceback (most recent call last)\r\n<ipython-input-3-8c9cdb26e3f3> in <module>\r\n----> 1 ctypes\r\n\r\nNameError: name 'ctypes' is not defined\r\n\r\nIn [4]: LittleEndianStructure\r\nOut[4]: _ctypes.Structure\r\n\r\nIn [5]: def foo():\r\n ...: return LittleEndianStructure\r\n ...:\r\n\r\nIn [6]: foo()\r\n---------------------------------------------------------------------------\r\nNameError Traceback (most recent call last)\r\n<ipython-input-6-c19b6d9633cf> in <module>\r\n----> 1 foo()\r\n\r\n<ipython-input-5-0b19aa36e370> in foo()\r\n 1 def foo():\r\n----> 2 return LittleEndianStructure\r\n 3\r\n\r\nNameError: name 'LittleEndianStructure' is not defined\r\n\r\nIn [7]:\r\n```\n", "before_files": [{"content": "\"\"\"\nCommand to start an interactive IPython prompt.\n\"\"\"\nimport sys\nfrom contextlib import contextmanager\n\nimport gdb\n\nimport pwndbg.color.message as M\nimport pwndbg.commands\nimport pwndbg.lib.stdio\n\n\n@contextmanager\ndef switch_to_ipython_env():\n \"\"\"We need to change stdout/stderr to the default ones, otherwise we can't use tab or autocomplete\"\"\"\n # Save GDB's excepthook\n saved_excepthook = sys.excepthook\n # Switch to default stdout/stderr\n with pwndbg.lib.stdio.stdio:\n yield\n # Restore Python's default ps1, ps2, and excepthook for GDB's `pi` command\n sys.ps1 = \">>> \"\n sys.ps2 = \"... \"\n sys.excepthook = saved_excepthook\n\n\[email protected](\"Start an interactive IPython prompt.\")\ndef ipi():\n with switch_to_ipython_env():\n # Use `gdb.execute` to embed IPython into GDB's variable scope\n try:\n gdb.execute(\"pi import IPython\")\n except gdb.error:\n print(\n M.warn(\n \"Cannot import IPython.\\n\"\n \"You need to install IPython if you want to use this command.\\n\"\n \"Maybe you can try `pip install ipython` first.\"\n )\n )\n return\n code4ipython = \"\"\"import jedi\nimport pwn\njedi.Interpreter._allow_descriptor_getattr_default = False\nIPython.embed(colors='neutral',banner1='',confirm_exit=False,simple_prompt=False)\n\"\"\"\n gdb.execute(f\"py\\n{code4ipython}\")\n", "path": "pwndbg/commands/ipython_interactive.py"}]}
1,451
154
gh_patches_debug_38768
rasdani/github-patches
git_diff
sublimelsp__LSP-717
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Formatting adding trailing newline clears last line * OS and language server Linux + Gopls * How you installed LSP (Package Control or from git?) Package Control * Minimal reproduction steps ```go package main import ( "fmt" ) func main() { fmt.Println("Hello, world") } // No newline! ``` Format ```go package main import ( "fmt" ) func main() { fmt.Println("Hello, world") ``` * Log No diagnostic output. Initially reported to [gopls](https://github.com/golang/go/issues/33717), but they pointed out that the gopls commandline does the right thing. Is this a LSP issue or Sublime itself? Let me know if I can provide any other helpful information! </issue> <code> [start of plugin/edit.py] 1 import sublime 2 import sublime_plugin 3 from .core.edit import sort_by_application_order 4 try: 5 from typing import List, Dict, Optional, Any, Iterable, Tuple 6 from .core.edit import TextEdit 7 assert List and Dict and Optional and Any and Iterable and Tuple and TextEdit 8 except ImportError: 9 pass 10 from .core.logging import debug 11 12 13 class LspApplyWorkspaceEditCommand(sublime_plugin.WindowCommand): 14 def run(self, changes: 'Optional[Dict[str, List[TextEdit]]]' = None): 15 documents_changed = 0 16 if changes: 17 for path, document_changes in changes.items(): 18 self.open_and_apply_edits(path, document_changes) 19 documents_changed += 1 20 21 if documents_changed > 0: 22 message = 'Applied changes to {} documents'.format(documents_changed) 23 self.window.status_message(message) 24 else: 25 self.window.status_message('No changes to apply to workspace') 26 27 def open_and_apply_edits(self, path, file_changes): 28 view = self.window.open_file(path) 29 if view: 30 if view.is_loading(): 31 # TODO: wait for event instead. 32 sublime.set_timeout_async( 33 lambda: view.run_command('lsp_apply_document_edit', {'changes': file_changes}), 34 500 35 ) 36 else: 37 view.run_command('lsp_apply_document_edit', 38 {'changes': file_changes}) 39 else: 40 debug('view not found to apply', path, file_changes) 41 42 43 class LspApplyDocumentEditCommand(sublime_plugin.TextCommand): 44 def run(self, edit, changes: 'Optional[List[TextEdit]]' = None): 45 # Apply the changes in reverse, so that we don't invalidate the range 46 # of any change that we haven't applied yet. 47 if changes: 48 for change in sort_by_application_order(changes): 49 start, end, newText = change 50 region = sublime.Region(self.view.text_point(*start), self.view.text_point(*end)) 51 self.apply_change(region, newText, edit) 52 53 def apply_change(self, region: 'sublime.Region', newText: str, edit): 54 if region.empty(): 55 self.view.insert(edit, region.a, newText) 56 else: 57 if len(newText) > 0: 58 self.view.replace(edit, region, newText) 59 else: 60 self.view.erase(edit, region) 61 [end of plugin/edit.py] [start of plugin/core/edit.py] 1 from .url import uri_to_filename 2 try: 3 from typing import List, Dict, Optional, Any, Iterable, Tuple 4 TextEdit = Tuple[Tuple[int, int], Tuple[int, int], str] 5 assert List and Dict and Optional and Any and Iterable and Tuple 6 except ImportError: 7 pass 8 9 10 def parse_workspace_edit(workspace_edit: 'Dict[str, Any]') -> 'Dict[str, List[TextEdit]]': 11 changes = {} # type: Dict[str, List[TextEdit]] 12 if 'changes' in workspace_edit: 13 for uri, file_changes in workspace_edit.get('changes', {}).items(): 14 changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes) 15 if 'documentChanges' in workspace_edit: 16 for document_change in workspace_edit.get('documentChanges', []): 17 uri = document_change.get('textDocument').get('uri') 18 changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in document_change.get('edits')) 19 return changes 20 21 22 def parse_range(range: 'Dict[str, int]') -> 'Tuple[int, int]': 23 return range['line'], range['character'] 24 25 26 def parse_text_edit(text_edit: 'Dict[str, Any]') -> 'TextEdit': 27 return ( 28 parse_range(text_edit['range']['start']), 29 parse_range(text_edit['range']['end']), 30 text_edit.get('newText', '') 31 ) 32 33 34 def sort_by_application_order(changes: 'Iterable[TextEdit]') -> 'List[TextEdit]': 35 36 def get_start_position(pair: 'Tuple[int, TextEdit]'): 37 index, change = pair 38 return change[0][0], change[0][1], index 39 40 # The spec reads: 41 # > However, it is possible that multiple edits have the same start position: multiple 42 # > inserts, or any number of inserts followed by a single remove or replace edit. If 43 # > multiple inserts have the same position, the order in the array defines the order in 44 # > which the inserted strings appear in the resulting text. 45 # So we sort by start position. But if multiple text edits start at the same position, 46 # we use the index in the array as the key. 47 48 return list(map(lambda pair: pair[1], sorted(enumerate(changes), key=get_start_position, reverse=True))) 49 [end of plugin/core/edit.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugin/core/edit.py b/plugin/core/edit.py --- a/plugin/core/edit.py +++ b/plugin/core/edit.py @@ -1,4 +1,5 @@ from .url import uri_to_filename +import operator try: from typing import List, Dict, Optional, Any, Iterable, Tuple TextEdit = Tuple[Tuple[int, int], Tuple[int, int], str] @@ -32,11 +33,6 @@ def sort_by_application_order(changes: 'Iterable[TextEdit]') -> 'List[TextEdit]': - - def get_start_position(pair: 'Tuple[int, TextEdit]'): - index, change = pair - return change[0][0], change[0][1], index - # The spec reads: # > However, it is possible that multiple edits have the same start position: multiple # > inserts, or any number of inserts followed by a single remove or replace edit. If @@ -45,4 +41,4 @@ # So we sort by start position. But if multiple text edits start at the same position, # we use the index in the array as the key. - return list(map(lambda pair: pair[1], sorted(enumerate(changes), key=get_start_position, reverse=True))) + return list(sorted(changes, key=operator.itemgetter(0))) diff --git a/plugin/edit.py b/plugin/edit.py --- a/plugin/edit.py +++ b/plugin/edit.py @@ -41,14 +41,24 @@ class LspApplyDocumentEditCommand(sublime_plugin.TextCommand): + def run(self, edit, changes: 'Optional[List[TextEdit]]' = None): # Apply the changes in reverse, so that we don't invalidate the range # of any change that we haven't applied yet. if changes: - for change in sort_by_application_order(changes): + last_row, last_col = self.view.rowcol(self.view.size()) + for change in reversed(sort_by_application_order(changes)): start, end, newText = change region = sublime.Region(self.view.text_point(*start), self.view.text_point(*end)) - self.apply_change(region, newText, edit) + + if start[0] > last_row and newText[0] != '\n': + # Handle when a language server (eg gopls) inserts at a row beyond the document + # some editors create the line automatically, sublime needs to have the newline prepended. + debug('adding new line for edit at line {}, document ended at line {}'.format(start[0], last_row)) + self.apply_change(region, '\n' + newText, edit) + last_row, last_col = self.view.rowcol(self.view.size()) + else: + self.apply_change(region, newText, edit) def apply_change(self, region: 'sublime.Region', newText: str, edit): if region.empty():
{"golden_diff": "diff --git a/plugin/core/edit.py b/plugin/core/edit.py\n--- a/plugin/core/edit.py\n+++ b/plugin/core/edit.py\n@@ -1,4 +1,5 @@\n from .url import uri_to_filename\n+import operator\n try:\n from typing import List, Dict, Optional, Any, Iterable, Tuple\n TextEdit = Tuple[Tuple[int, int], Tuple[int, int], str]\n@@ -32,11 +33,6 @@\n \n \n def sort_by_application_order(changes: 'Iterable[TextEdit]') -> 'List[TextEdit]':\n-\n- def get_start_position(pair: 'Tuple[int, TextEdit]'):\n- index, change = pair\n- return change[0][0], change[0][1], index\n-\n # The spec reads:\n # > However, it is possible that multiple edits have the same start position: multiple\n # > inserts, or any number of inserts followed by a single remove or replace edit. If\n@@ -45,4 +41,4 @@\n # So we sort by start position. But if multiple text edits start at the same position,\n # we use the index in the array as the key.\n \n- return list(map(lambda pair: pair[1], sorted(enumerate(changes), key=get_start_position, reverse=True)))\n+ return list(sorted(changes, key=operator.itemgetter(0)))\ndiff --git a/plugin/edit.py b/plugin/edit.py\n--- a/plugin/edit.py\n+++ b/plugin/edit.py\n@@ -41,14 +41,24 @@\n \n \n class LspApplyDocumentEditCommand(sublime_plugin.TextCommand):\n+\n def run(self, edit, changes: 'Optional[List[TextEdit]]' = None):\n # Apply the changes in reverse, so that we don't invalidate the range\n # of any change that we haven't applied yet.\n if changes:\n- for change in sort_by_application_order(changes):\n+ last_row, last_col = self.view.rowcol(self.view.size())\n+ for change in reversed(sort_by_application_order(changes)):\n start, end, newText = change\n region = sublime.Region(self.view.text_point(*start), self.view.text_point(*end))\n- self.apply_change(region, newText, edit)\n+\n+ if start[0] > last_row and newText[0] != '\\n':\n+ # Handle when a language server (eg gopls) inserts at a row beyond the document\n+ # some editors create the line automatically, sublime needs to have the newline prepended.\n+ debug('adding new line for edit at line {}, document ended at line {}'.format(start[0], last_row))\n+ self.apply_change(region, '\\n' + newText, edit)\n+ last_row, last_col = self.view.rowcol(self.view.size())\n+ else:\n+ self.apply_change(region, newText, edit)\n \n def apply_change(self, region: 'sublime.Region', newText: str, edit):\n if region.empty():\n", "issue": "Formatting adding trailing newline clears last line\n* OS and language server\r\nLinux + Gopls\r\n* How you installed LSP (Package Control or from git?)\r\nPackage Control\r\n* Minimal reproduction steps\r\n```go\r\npackage main\r\n\r\nimport (\r\n\t\"fmt\"\r\n)\r\n\r\nfunc main() {\r\n\tfmt.Println(\"Hello, world\")\r\n} // No newline!\r\n```\r\n\r\nFormat\r\n\r\n```go\r\npackage main\r\n\r\nimport (\r\n\t\"fmt\"\r\n)\r\n\r\nfunc main() {\r\n\tfmt.Println(\"Hello, world\")\r\n```\r\n* Log\r\nNo diagnostic output. \r\n\r\nInitially reported to [gopls](https://github.com/golang/go/issues/33717), but they pointed out that the gopls commandline does the right thing.\r\n\r\nIs this a LSP issue or Sublime itself?\r\nLet me know if I can provide any other helpful information!\n", "before_files": [{"content": "import sublime\nimport sublime_plugin\nfrom .core.edit import sort_by_application_order\ntry:\n from typing import List, Dict, Optional, Any, Iterable, Tuple\n from .core.edit import TextEdit\n assert List and Dict and Optional and Any and Iterable and Tuple and TextEdit\nexcept ImportError:\n pass\nfrom .core.logging import debug\n\n\nclass LspApplyWorkspaceEditCommand(sublime_plugin.WindowCommand):\n def run(self, changes: 'Optional[Dict[str, List[TextEdit]]]' = None):\n documents_changed = 0\n if changes:\n for path, document_changes in changes.items():\n self.open_and_apply_edits(path, document_changes)\n documents_changed += 1\n\n if documents_changed > 0:\n message = 'Applied changes to {} documents'.format(documents_changed)\n self.window.status_message(message)\n else:\n self.window.status_message('No changes to apply to workspace')\n\n def open_and_apply_edits(self, path, file_changes):\n view = self.window.open_file(path)\n if view:\n if view.is_loading():\n # TODO: wait for event instead.\n sublime.set_timeout_async(\n lambda: view.run_command('lsp_apply_document_edit', {'changes': file_changes}),\n 500\n )\n else:\n view.run_command('lsp_apply_document_edit',\n {'changes': file_changes})\n else:\n debug('view not found to apply', path, file_changes)\n\n\nclass LspApplyDocumentEditCommand(sublime_plugin.TextCommand):\n def run(self, edit, changes: 'Optional[List[TextEdit]]' = None):\n # Apply the changes in reverse, so that we don't invalidate the range\n # of any change that we haven't applied yet.\n if changes:\n for change in sort_by_application_order(changes):\n start, end, newText = change\n region = sublime.Region(self.view.text_point(*start), self.view.text_point(*end))\n self.apply_change(region, newText, edit)\n\n def apply_change(self, region: 'sublime.Region', newText: str, edit):\n if region.empty():\n self.view.insert(edit, region.a, newText)\n else:\n if len(newText) > 0:\n self.view.replace(edit, region, newText)\n else:\n self.view.erase(edit, region)\n", "path": "plugin/edit.py"}, {"content": "from .url import uri_to_filename\ntry:\n from typing import List, Dict, Optional, Any, Iterable, Tuple\n TextEdit = Tuple[Tuple[int, int], Tuple[int, int], str]\n assert List and Dict and Optional and Any and Iterable and Tuple\nexcept ImportError:\n pass\n\n\ndef parse_workspace_edit(workspace_edit: 'Dict[str, Any]') -> 'Dict[str, List[TextEdit]]':\n changes = {} # type: Dict[str, List[TextEdit]]\n if 'changes' in workspace_edit:\n for uri, file_changes in workspace_edit.get('changes', {}).items():\n changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in file_changes)\n if 'documentChanges' in workspace_edit:\n for document_change in workspace_edit.get('documentChanges', []):\n uri = document_change.get('textDocument').get('uri')\n changes[uri_to_filename(uri)] = list(parse_text_edit(change) for change in document_change.get('edits'))\n return changes\n\n\ndef parse_range(range: 'Dict[str, int]') -> 'Tuple[int, int]':\n return range['line'], range['character']\n\n\ndef parse_text_edit(text_edit: 'Dict[str, Any]') -> 'TextEdit':\n return (\n parse_range(text_edit['range']['start']),\n parse_range(text_edit['range']['end']),\n text_edit.get('newText', '')\n )\n\n\ndef sort_by_application_order(changes: 'Iterable[TextEdit]') -> 'List[TextEdit]':\n\n def get_start_position(pair: 'Tuple[int, TextEdit]'):\n index, change = pair\n return change[0][0], change[0][1], index\n\n # The spec reads:\n # > However, it is possible that multiple edits have the same start position: multiple\n # > inserts, or any number of inserts followed by a single remove or replace edit. If\n # > multiple inserts have the same position, the order in the array defines the order in\n # > which the inserted strings appear in the resulting text.\n # So we sort by start position. But if multiple text edits start at the same position,\n # we use the index in the array as the key.\n\n return list(map(lambda pair: pair[1], sorted(enumerate(changes), key=get_start_position, reverse=True)))\n", "path": "plugin/core/edit.py"}]}
1,917
636
gh_patches_debug_1674
rasdani/github-patches
git_diff
conan-io__conan-4324
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> tools.environment_append raises if tries to unset variable which was never set after #4224, I may use the following code, for instance, to ensure variable is not set: ``` with environment_append({'CONAN_BASH_PATH': None}): pass ``` however, it raises if `CONAN_BASH_PATH` is not set (prior to the environment_append invocation): ``` Traceback (most recent call last): File "C:\bincrafters\conan\conans\test\unittests\client\tools\os_info\osinfo_test.py", line 39, in test_windows with environment_append(new_env): File "c:\users\sse4\appdata\local\programs\python\python36\lib\contextlib.py", line 81, in __enter__ return next(self.gen) File "C:\bincrafters\conan\conans\client\tools\env.py", line 57, in environment_append os.environ.pop(var) File "c:\users\sse4\appdata\local\programs\python\python36\lib\_collections_abc.py", line 795, in pop value = self[key] File "c:\users\sse4\appdata\local\programs\python\python36\lib\os.py", line 669, in __getitem__ raise KeyError(key) from None KeyError: 'CONAN_BASH_PATH' ``` I would expect `tools.environment_append` to be no op in such case, otherwise, it requires additional logic to workaround this behavior. To help us debug your issue please explain: - [ ] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md). - [ ] I've specified the Conan version, operating system version and any tool that can be relevant. - [ ] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion. </issue> <code> [start of conans/client/tools/env.py] 1 import os 2 import sys 3 from contextlib import contextmanager 4 5 from conans.client.run_environment import RunEnvironment 6 from conans.client.tools.files import _path_equals, which 7 from conans.errors import ConanException 8 9 10 @contextmanager 11 def pythonpath(conanfile): 12 python_path = conanfile.env.get("PYTHONPATH", None) 13 if python_path: 14 old_path = sys.path[:] 15 if isinstance(python_path, list): 16 sys.path.extend(python_path) 17 else: 18 sys.path.append(python_path) 19 20 yield 21 sys.path = old_path 22 else: 23 yield 24 25 26 @contextmanager 27 def run_environment(conanfile): 28 with environment_append(RunEnvironment(conanfile).vars): 29 yield 30 31 32 @contextmanager 33 def environment_append(env_vars): 34 """ 35 :param env_vars: List (dict) of simple environment vars. {name: value, name2: value2} => e.g.: MYVAR=1 36 The values can also be lists of appendable environment vars. {name: [value, value2]} 37 => e.g. PATH=/path/1:/path/2 38 If the value is set to None, then that environment variable is unset. 39 :return: None 40 """ 41 unset_vars = [] 42 for key in env_vars.keys(): 43 if env_vars[key] is None: 44 unset_vars.append(key) 45 for var in unset_vars: 46 env_vars.pop(var, None) 47 for name, value in env_vars.items(): 48 if isinstance(value, list): 49 env_vars[name] = os.pathsep.join(value) 50 old = os.environ.get(name) 51 if old: 52 env_vars[name] += os.pathsep + old 53 if env_vars or unset_vars: 54 old_env = dict(os.environ) 55 os.environ.update(env_vars) 56 for var in unset_vars: 57 os.environ.pop(var) 58 try: 59 yield 60 finally: 61 os.environ.clear() 62 os.environ.update(old_env) 63 else: 64 yield 65 66 67 @contextmanager 68 def no_op(): 69 yield 70 71 72 @contextmanager 73 def remove_from_path(command): 74 curpath = os.getenv("PATH") 75 first_it = True 76 for _ in range(30): 77 if not first_it: 78 with environment_append({"PATH": curpath}): 79 the_command = which(command) 80 else: 81 the_command = which(command) 82 first_it = False 83 84 if not the_command: 85 break 86 new_path = [] 87 for entry in curpath.split(os.pathsep): 88 if not _path_equals(entry, os.path.dirname(the_command)): 89 new_path.append(entry) 90 91 curpath = os.pathsep.join(new_path) 92 else: 93 raise ConanException("Error in tools.remove_from_path!! couldn't remove the tool '%s' " 94 "from the path after 30 attempts, still found in '%s' this is a " 95 "Conan client bug, please open an issue at: " 96 "https://github.com/conan-io/conan\n\nPATH=%s" 97 % (command, the_command, os.getenv("PATH"))) 98 99 with environment_append({"PATH": curpath}): 100 yield 101 [end of conans/client/tools/env.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conans/client/tools/env.py b/conans/client/tools/env.py --- a/conans/client/tools/env.py +++ b/conans/client/tools/env.py @@ -54,7 +54,7 @@ old_env = dict(os.environ) os.environ.update(env_vars) for var in unset_vars: - os.environ.pop(var) + os.environ.pop(var, None) try: yield finally:
{"golden_diff": "diff --git a/conans/client/tools/env.py b/conans/client/tools/env.py\n--- a/conans/client/tools/env.py\n+++ b/conans/client/tools/env.py\n@@ -54,7 +54,7 @@\n old_env = dict(os.environ)\n os.environ.update(env_vars)\n for var in unset_vars:\n- os.environ.pop(var)\n+ os.environ.pop(var, None)\n try:\n yield\n finally:\n", "issue": "tools.environment_append raises if tries to unset variable which was never set\nafter #4224, I may use the following code, for instance, to ensure variable is not set:\r\n```\r\nwith environment_append({'CONAN_BASH_PATH': None}):\r\n pass\r\n```\r\nhowever, it raises if `CONAN_BASH_PATH` is not set (prior to the environment_append invocation):\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\bincrafters\\conan\\conans\\test\\unittests\\client\\tools\\os_info\\osinfo_test.py\", line 39, in test_windows\r\n with environment_append(new_env):\r\n File \"c:\\users\\sse4\\appdata\\local\\programs\\python\\python36\\lib\\contextlib.py\", line 81, in __enter__\r\n return next(self.gen)\r\n File \"C:\\bincrafters\\conan\\conans\\client\\tools\\env.py\", line 57, in environment_append\r\n os.environ.pop(var)\r\n File \"c:\\users\\sse4\\appdata\\local\\programs\\python\\python36\\lib\\_collections_abc.py\", line 795, in pop\r\n value = self[key]\r\n File \"c:\\users\\sse4\\appdata\\local\\programs\\python\\python36\\lib\\os.py\", line 669, in __getitem__\r\n raise KeyError(key) from None\r\nKeyError: 'CONAN_BASH_PATH'\r\n```\r\nI would expect `tools.environment_append` to be no op in such case, otherwise, it requires additional logic to workaround this behavior.\r\n\r\nTo help us debug your issue please explain:\r\n\r\n- [ ] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).\r\n- [ ] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [ ] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\n\n", "before_files": [{"content": "import os\nimport sys\nfrom contextlib import contextmanager\n\nfrom conans.client.run_environment import RunEnvironment\nfrom conans.client.tools.files import _path_equals, which\nfrom conans.errors import ConanException\n\n\n@contextmanager\ndef pythonpath(conanfile):\n python_path = conanfile.env.get(\"PYTHONPATH\", None)\n if python_path:\n old_path = sys.path[:]\n if isinstance(python_path, list):\n sys.path.extend(python_path)\n else:\n sys.path.append(python_path)\n\n yield\n sys.path = old_path\n else:\n yield\n\n\n@contextmanager\ndef run_environment(conanfile):\n with environment_append(RunEnvironment(conanfile).vars):\n yield\n\n\n@contextmanager\ndef environment_append(env_vars):\n \"\"\"\n :param env_vars: List (dict) of simple environment vars. {name: value, name2: value2} => e.g.: MYVAR=1\n The values can also be lists of appendable environment vars. {name: [value, value2]}\n => e.g. PATH=/path/1:/path/2\n If the value is set to None, then that environment variable is unset.\n :return: None\n \"\"\"\n unset_vars = []\n for key in env_vars.keys():\n if env_vars[key] is None:\n unset_vars.append(key)\n for var in unset_vars:\n env_vars.pop(var, None)\n for name, value in env_vars.items():\n if isinstance(value, list):\n env_vars[name] = os.pathsep.join(value)\n old = os.environ.get(name)\n if old:\n env_vars[name] += os.pathsep + old\n if env_vars or unset_vars:\n old_env = dict(os.environ)\n os.environ.update(env_vars)\n for var in unset_vars:\n os.environ.pop(var)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_env)\n else:\n yield\n\n\n@contextmanager\ndef no_op():\n yield\n\n\n@contextmanager\ndef remove_from_path(command):\n curpath = os.getenv(\"PATH\")\n first_it = True\n for _ in range(30):\n if not first_it:\n with environment_append({\"PATH\": curpath}):\n the_command = which(command)\n else:\n the_command = which(command)\n first_it = False\n\n if not the_command:\n break\n new_path = []\n for entry in curpath.split(os.pathsep):\n if not _path_equals(entry, os.path.dirname(the_command)):\n new_path.append(entry)\n\n curpath = os.pathsep.join(new_path)\n else:\n raise ConanException(\"Error in tools.remove_from_path!! couldn't remove the tool '%s' \"\n \"from the path after 30 attempts, still found in '%s' this is a \"\n \"Conan client bug, please open an issue at: \"\n \"https://github.com/conan-io/conan\\n\\nPATH=%s\"\n % (command, the_command, os.getenv(\"PATH\")))\n\n with environment_append({\"PATH\": curpath}):\n yield\n", "path": "conans/client/tools/env.py"}]}
1,854
96
gh_patches_debug_3685
rasdani/github-patches
git_diff
praw-dev__praw-888
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ListingGenerator gets stuck in loop when 'before' parameter is supplied. ## Issue Description When retrieving submissions and supplying the 'before' parameter, the ListingGenerator gets stuck in a loop where the same 'after' parameter is submitted to reddit infinitely. I'm submitting a pull request with a fix. Additional details can be found in the pull request. Example: ``reddit.subreddit(subreddit).new(limit=1024,params={'before': 't3_7xxxxx', 'count': 1024 })`` ## System Information PRAW Version: 5.3.0 Python Version: 3.5.3 Operating System: Debian 9.3 </issue> <code> [start of praw/models/listing/generator.py] 1 """Provide the ListingGenerator class.""" 2 from copy import deepcopy 3 4 from .listing import FlairListing 5 from ..base import PRAWBase 6 7 8 class ListingGenerator(PRAWBase): 9 """Instances of this class generate :class:`.RedditBase` instances. 10 11 .. warning:: This class should not be directly utilized. Instead you will 12 find a number of methods that return instances of the class: 13 14 http://praw.readthedocs.io/en/latest/search.html?q=ListingGenerator 15 16 """ 17 18 def __init__(self, reddit, url, limit=100, params=None): 19 """Initialize a ListingGenerator instance. 20 21 :param reddit: An instance of :class:`.Reddit`. 22 :param url: A URL returning a reddit listing. 23 :param limit: The number of content entries to fetch. If ``limit`` is 24 None, then fetch as many entries as possible. Most of reddit's 25 listings contain a maximum of 1000 items, and are returned 100 at a 26 time. This class will automatically issue all necessary 27 requests (default: 100). 28 :param params: A dictionary containing additional query string 29 parameters to send with the request. 30 31 """ 32 super(ListingGenerator, self).__init__(reddit, None) 33 self._exhausted = False 34 self._listing = None 35 self._list_index = None 36 self.limit = limit 37 self.params = deepcopy(params) if params else {} 38 self.params['limit'] = limit or 1024 39 self.url = url 40 self.yielded = 0 41 42 def __iter__(self): 43 """Permit ListingGenerator to operate as an iterator.""" 44 return self 45 46 def __next__(self): 47 """Permit ListingGenerator to operate as a generator in py3.""" 48 if self.limit is not None and self.yielded >= self.limit: 49 raise StopIteration() 50 51 if self._listing is None or self._list_index >= len(self._listing): 52 self._next_batch() 53 54 self._list_index += 1 55 self.yielded += 1 56 return self._listing[self._list_index - 1] 57 58 def _next_batch(self): 59 if self._exhausted: 60 raise StopIteration() 61 62 self._listing = self._reddit.get(self.url, params=self.params) 63 if isinstance(self._listing, list): 64 self._listing = self._listing[1] # for submission duplicates 65 elif isinstance(self._listing, dict): 66 self._listing = FlairListing(self._reddit, self._listing) 67 self._list_index = 0 68 69 if not self._listing: 70 raise StopIteration() 71 72 if self._listing.after: 73 self.params['after'] = self._listing.after 74 else: 75 self._exhausted = True 76 77 def next(self): 78 """Permit ListingGenerator to operate as a generator in py2.""" 79 return self.__next__() 80 [end of praw/models/listing/generator.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/praw/models/listing/generator.py b/praw/models/listing/generator.py --- a/praw/models/listing/generator.py +++ b/praw/models/listing/generator.py @@ -69,7 +69,8 @@ if not self._listing: raise StopIteration() - if self._listing.after: + if (self._listing.after and + self._listing.after != self.params.get('after')): self.params['after'] = self._listing.after else: self._exhausted = True
{"golden_diff": "diff --git a/praw/models/listing/generator.py b/praw/models/listing/generator.py\n--- a/praw/models/listing/generator.py\n+++ b/praw/models/listing/generator.py\n@@ -69,7 +69,8 @@\n if not self._listing:\n raise StopIteration()\n \n- if self._listing.after:\n+ if (self._listing.after and\n+ self._listing.after != self.params.get('after')):\n self.params['after'] = self._listing.after\n else:\n self._exhausted = True\n", "issue": "ListingGenerator gets stuck in loop when 'before' parameter is supplied.\n## Issue Description\r\n\r\nWhen retrieving submissions and supplying the 'before' parameter, the ListingGenerator gets stuck in a loop where the same 'after' parameter is submitted to reddit infinitely. I'm submitting a pull request with a fix. Additional details can be found in the pull request.\r\n\r\nExample:\r\n``reddit.subreddit(subreddit).new(limit=1024,params={'before': 't3_7xxxxx', 'count': 1024 })``\r\n\r\n## System Information\r\n\r\n PRAW Version: 5.3.0\r\n Python Version: 3.5.3\r\nOperating System: Debian 9.3\r\n\n", "before_files": [{"content": "\"\"\"Provide the ListingGenerator class.\"\"\"\nfrom copy import deepcopy\n\nfrom .listing import FlairListing\nfrom ..base import PRAWBase\n\n\nclass ListingGenerator(PRAWBase):\n \"\"\"Instances of this class generate :class:`.RedditBase` instances.\n\n .. warning:: This class should not be directly utilized. Instead you will\n find a number of methods that return instances of the class:\n\n http://praw.readthedocs.io/en/latest/search.html?q=ListingGenerator\n\n \"\"\"\n\n def __init__(self, reddit, url, limit=100, params=None):\n \"\"\"Initialize a ListingGenerator instance.\n\n :param reddit: An instance of :class:`.Reddit`.\n :param url: A URL returning a reddit listing.\n :param limit: The number of content entries to fetch. If ``limit`` is\n None, then fetch as many entries as possible. Most of reddit's\n listings contain a maximum of 1000 items, and are returned 100 at a\n time. This class will automatically issue all necessary\n requests (default: 100).\n :param params: A dictionary containing additional query string\n parameters to send with the request.\n\n \"\"\"\n super(ListingGenerator, self).__init__(reddit, None)\n self._exhausted = False\n self._listing = None\n self._list_index = None\n self.limit = limit\n self.params = deepcopy(params) if params else {}\n self.params['limit'] = limit or 1024\n self.url = url\n self.yielded = 0\n\n def __iter__(self):\n \"\"\"Permit ListingGenerator to operate as an iterator.\"\"\"\n return self\n\n def __next__(self):\n \"\"\"Permit ListingGenerator to operate as a generator in py3.\"\"\"\n if self.limit is not None and self.yielded >= self.limit:\n raise StopIteration()\n\n if self._listing is None or self._list_index >= len(self._listing):\n self._next_batch()\n\n self._list_index += 1\n self.yielded += 1\n return self._listing[self._list_index - 1]\n\n def _next_batch(self):\n if self._exhausted:\n raise StopIteration()\n\n self._listing = self._reddit.get(self.url, params=self.params)\n if isinstance(self._listing, list):\n self._listing = self._listing[1] # for submission duplicates\n elif isinstance(self._listing, dict):\n self._listing = FlairListing(self._reddit, self._listing)\n self._list_index = 0\n\n if not self._listing:\n raise StopIteration()\n\n if self._listing.after:\n self.params['after'] = self._listing.after\n else:\n self._exhausted = True\n\n def next(self):\n \"\"\"Permit ListingGenerator to operate as a generator in py2.\"\"\"\n return self.__next__()\n", "path": "praw/models/listing/generator.py"}]}
1,483
125
gh_patches_debug_17194
rasdani/github-patches
git_diff
vacanza__python-holidays-1592
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Regression in holidays in the Netherlands I noticed the change Update Netherlands holidays: add holiday categories #1552 broke my unit tests as it no longer considers Liberation day as a holiday on the 5yr interval even though it is a holiday for most people in the Netherlands every 5 years. On Liberation day (Bevrijdingsdag) the majority of employees have a day off every 5 years (2005, 2010, 2015, etc.). This was the previous behaviour which worked as expected. Now the 5-year rule of Liberation day is combined with Good Friday in the OPTIONAL category. This equates the status of Liberation day with Good Friday, but this is not logical. Good Fridays is more similar to Liberation Day on the non 5yr-years as those are still a bank holiday but fewer people have a guaranteed holiday than in the 5-yr years. There is no option to add the non-5yr holidays it seems. The behaviour I would expect is: - PUBLIC includes 5yr Liberation days - OPTIONAL includes Good Friday and the non-5yr liberation days </issue> <code> [start of holidays/countries/netherlands.py] 1 # python-holidays 2 # --------------- 3 # A fast, efficient Python library for generating country, province and state 4 # specific sets of holidays on the fly. It aims to make determining whether a 5 # specific date is a holiday as fast and flexible as possible. 6 # 7 # Authors: dr-prodigy <[email protected]> (c) 2017-2023 8 # ryanss <[email protected]> (c) 2014-2017 9 # Website: https://github.com/dr-prodigy/python-holidays 10 # License: MIT (see LICENSE file) 11 12 from datetime import date 13 from datetime import timedelta as td 14 from gettext import gettext as tr 15 16 from holidays.calendars.gregorian import APR, AUG 17 from holidays.constants import OPTIONAL, PUBLIC 18 from holidays.groups import ChristianHolidays, InternationalHolidays 19 from holidays.holiday_base import HolidayBase 20 21 22 class Netherlands(HolidayBase, ChristianHolidays, InternationalHolidays): 23 """ 24 References: 25 26 - https://en.wikipedia.org/wiki/Public_holidays_in_the_Netherlands 27 - https://nl.wikipedia.org/wiki/Feestdagen_in_Nederland 28 - http://www.iamsterdam.com/en/plan-your-trip/practical-info/public-holidays 29 """ 30 31 country = "NL" 32 default_language = "nl" 33 supported_categories = (OPTIONAL, PUBLIC) 34 supported_languages = ("en_US", "nl", "uk") 35 36 def __init__(self, *args, **kwargs): 37 ChristianHolidays.__init__(self) 38 InternationalHolidays.__init__(self) 39 super().__init__(*args, **kwargs) 40 41 def _populate_public_holidays(self): 42 # New Year's Day. 43 self._add_new_years_day(tr("Nieuwjaarsdag")) 44 45 # Easter Sunday. 46 self._add_easter_sunday(tr("Eerste paasdag")) 47 48 # Easter Monday. 49 self._add_easter_monday(tr("Tweede paasdag")) 50 51 # King's / Queen's day 52 if self._year >= 1891: 53 name = ( 54 # King's Day. 55 tr("Koningsdag") 56 if self._year >= 2014 57 # Queen's Day. 58 else tr("Koninginnedag") 59 ) 60 if self._year >= 2014: 61 dt = date(self._year, APR, 27) 62 elif self._year >= 1949: 63 dt = date(self._year, APR, 30) 64 else: 65 dt = date(self._year, AUG, 31) 66 if self._is_sunday(dt): 67 dt += td(days=-1) if self._year >= 1980 else td(days=+1) 68 self._add_holiday(name, dt) 69 70 # Ascension Day. 71 self._add_ascension_thursday(tr("Hemelvaartsdag")) 72 73 # Whit Sunday. 74 self._add_whit_sunday(tr("Eerste Pinksterdag")) 75 76 # Whit Monday. 77 self._add_whit_monday(tr("Tweede Pinksterdag")) 78 79 # Christmas Day. 80 self._add_christmas_day(tr("Eerste Kerstdag")) 81 82 # Second Day of Christmas. 83 self._add_christmas_day_two(tr("Tweede Kerstdag")) 84 85 def _populate_optional_holidays(self): 86 # Good Friday. 87 self._add_good_friday(tr("Goede Vrijdag")) 88 89 if (self._year >= 1945 and self._year % 5 == 0) or self._year >= 1990: 90 # Liberation Day. 91 self._add_holiday_may_5(tr("Bevrijdingsdag")) 92 93 94 class NL(Netherlands): 95 pass 96 97 98 class NLD(Netherlands): 99 pass 100 [end of holidays/countries/netherlands.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/holidays/countries/netherlands.py b/holidays/countries/netherlands.py --- a/holidays/countries/netherlands.py +++ b/holidays/countries/netherlands.py @@ -67,6 +67,10 @@ dt += td(days=-1) if self._year >= 1980 else td(days=+1) self._add_holiday(name, dt) + if self._year >= 1950 and self._year % 5 == 0: + # Liberation Day. + self._add_holiday_may_5(tr("Bevrijdingsdag")) + # Ascension Day. self._add_ascension_thursday(tr("Hemelvaartsdag")) @@ -86,7 +90,7 @@ # Good Friday. self._add_good_friday(tr("Goede Vrijdag")) - if (self._year >= 1945 and self._year % 5 == 0) or self._year >= 1990: + if self._year >= 1990: # Liberation Day. self._add_holiday_may_5(tr("Bevrijdingsdag"))
{"golden_diff": "diff --git a/holidays/countries/netherlands.py b/holidays/countries/netherlands.py\n--- a/holidays/countries/netherlands.py\n+++ b/holidays/countries/netherlands.py\n@@ -67,6 +67,10 @@\n dt += td(days=-1) if self._year >= 1980 else td(days=+1)\n self._add_holiday(name, dt)\n \n+ if self._year >= 1950 and self._year % 5 == 0:\n+ # Liberation Day.\n+ self._add_holiday_may_5(tr(\"Bevrijdingsdag\"))\n+\n # Ascension Day.\n self._add_ascension_thursday(tr(\"Hemelvaartsdag\"))\n \n@@ -86,7 +90,7 @@\n # Good Friday.\n self._add_good_friday(tr(\"Goede Vrijdag\"))\n \n- if (self._year >= 1945 and self._year % 5 == 0) or self._year >= 1990:\n+ if self._year >= 1990:\n # Liberation Day.\n self._add_holiday_may_5(tr(\"Bevrijdingsdag\"))\n", "issue": "Regression in holidays in the Netherlands\nI noticed the change Update Netherlands holidays: add holiday categories #1552 broke my unit tests as it no longer considers Liberation day as a holiday on the 5yr interval even though it is a holiday for most people in the Netherlands every 5 years.\r\n\r\nOn Liberation day (Bevrijdingsdag) the majority of employees have a day off every 5 years (2005, 2010, 2015, etc.). This was the previous behaviour which worked as expected.\r\n\r\nNow the 5-year rule of Liberation day is combined with Good Friday in the OPTIONAL category. This equates the status of Liberation day with Good Friday, but this is not logical. Good Fridays is more similar to Liberation Day on the non 5yr-years as those are still a bank holiday but fewer people have a guaranteed holiday than in the 5-yr years. There is no option to add the non-5yr holidays it seems.\r\n\r\nThe behaviour I would expect is:\r\n- PUBLIC includes 5yr Liberation days\r\n- OPTIONAL includes Good Friday and the non-5yr liberation days\n", "before_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2023\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date\nfrom datetime import timedelta as td\nfrom gettext import gettext as tr\n\nfrom holidays.calendars.gregorian import APR, AUG\nfrom holidays.constants import OPTIONAL, PUBLIC\nfrom holidays.groups import ChristianHolidays, InternationalHolidays\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Netherlands(HolidayBase, ChristianHolidays, InternationalHolidays):\n \"\"\"\n References:\n\n - https://en.wikipedia.org/wiki/Public_holidays_in_the_Netherlands\n - https://nl.wikipedia.org/wiki/Feestdagen_in_Nederland\n - http://www.iamsterdam.com/en/plan-your-trip/practical-info/public-holidays\n \"\"\"\n\n country = \"NL\"\n default_language = \"nl\"\n supported_categories = (OPTIONAL, PUBLIC)\n supported_languages = (\"en_US\", \"nl\", \"uk\")\n\n def __init__(self, *args, **kwargs):\n ChristianHolidays.__init__(self)\n InternationalHolidays.__init__(self)\n super().__init__(*args, **kwargs)\n\n def _populate_public_holidays(self):\n # New Year's Day.\n self._add_new_years_day(tr(\"Nieuwjaarsdag\"))\n\n # Easter Sunday.\n self._add_easter_sunday(tr(\"Eerste paasdag\"))\n\n # Easter Monday.\n self._add_easter_monday(tr(\"Tweede paasdag\"))\n\n # King's / Queen's day\n if self._year >= 1891:\n name = (\n # King's Day.\n tr(\"Koningsdag\")\n if self._year >= 2014\n # Queen's Day.\n else tr(\"Koninginnedag\")\n )\n if self._year >= 2014:\n dt = date(self._year, APR, 27)\n elif self._year >= 1949:\n dt = date(self._year, APR, 30)\n else:\n dt = date(self._year, AUG, 31)\n if self._is_sunday(dt):\n dt += td(days=-1) if self._year >= 1980 else td(days=+1)\n self._add_holiday(name, dt)\n\n # Ascension Day.\n self._add_ascension_thursday(tr(\"Hemelvaartsdag\"))\n\n # Whit Sunday.\n self._add_whit_sunday(tr(\"Eerste Pinksterdag\"))\n\n # Whit Monday.\n self._add_whit_monday(tr(\"Tweede Pinksterdag\"))\n\n # Christmas Day.\n self._add_christmas_day(tr(\"Eerste Kerstdag\"))\n\n # Second Day of Christmas.\n self._add_christmas_day_two(tr(\"Tweede Kerstdag\"))\n\n def _populate_optional_holidays(self):\n # Good Friday.\n self._add_good_friday(tr(\"Goede Vrijdag\"))\n\n if (self._year >= 1945 and self._year % 5 == 0) or self._year >= 1990:\n # Liberation Day.\n self._add_holiday_may_5(tr(\"Bevrijdingsdag\"))\n\n\nclass NL(Netherlands):\n pass\n\n\nclass NLD(Netherlands):\n pass\n", "path": "holidays/countries/netherlands.py"}]}
1,816
267
gh_patches_debug_6755
rasdani/github-patches
git_diff
wagtail__wagtail-3277
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Elasticsearch backend indexes draft updates of existing pages ### Issue Summary When saving a draft version of an existing live page the Elasticsearch backend reindexes the page with the draft content. The reindexed content will potentially cause frontend search results to include the page if the search query matches the draft content. I'm using the following search query in my view: search_results = Page.objects.live().search(search_query) New content that is saved as draft is not an issue since the live() filter excludes it. ### Steps to Reproduce 1. Edit an indexed field of an existing published page 2. Insert a unique term in the indexed field 3. Click 'Save Draft' 4. On the fontend search for the unique term. 5. The editted page will be returned in the results I can see the value of having the draft content indexed on the Wagtail backend but since the frontend shares the same index, that can be a problem. ### Technical details * Python version: 3.5.2. * Django version: 1.10.4. * Wagtail version: 1.8. * Elasticsearch: 5 </issue> <code> [start of wagtail/wagtailsearch/signal_handlers.py] 1 from __future__ import absolute_import, unicode_literals 2 3 from django.db.models.signals import post_delete, post_save 4 5 from wagtail.wagtailsearch import index 6 7 8 def post_save_signal_handler(instance, **kwargs): 9 index.insert_or_update_object(instance) 10 11 12 def post_delete_signal_handler(instance, **kwargs): 13 index.remove_object(instance) 14 15 16 def register_signal_handlers(): 17 # Loop through list and register signal handlers for each one 18 for model in index.get_indexed_models(): 19 post_save.connect(post_save_signal_handler, sender=model) 20 post_delete.connect(post_delete_signal_handler, sender=model) 21 [end of wagtail/wagtailsearch/signal_handlers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/wagtailsearch/signal_handlers.py b/wagtail/wagtailsearch/signal_handlers.py --- a/wagtail/wagtailsearch/signal_handlers.py +++ b/wagtail/wagtailsearch/signal_handlers.py @@ -5,7 +5,13 @@ from wagtail.wagtailsearch import index -def post_save_signal_handler(instance, **kwargs): +def post_save_signal_handler(instance, update_fields=None, **kwargs): + if update_fields is not None: + # fetch a fresh copy of instance from the database to ensure + # that we're not indexing any of the unsaved data contained in + # the fields that were not passed in update_fields + instance = type(instance).objects.get(pk=instance.pk) + index.insert_or_update_object(instance)
{"golden_diff": "diff --git a/wagtail/wagtailsearch/signal_handlers.py b/wagtail/wagtailsearch/signal_handlers.py\n--- a/wagtail/wagtailsearch/signal_handlers.py\n+++ b/wagtail/wagtailsearch/signal_handlers.py\n@@ -5,7 +5,13 @@\n from wagtail.wagtailsearch import index\n \n \n-def post_save_signal_handler(instance, **kwargs):\n+def post_save_signal_handler(instance, update_fields=None, **kwargs):\n+ if update_fields is not None:\n+ # fetch a fresh copy of instance from the database to ensure\n+ # that we're not indexing any of the unsaved data contained in\n+ # the fields that were not passed in update_fields\n+ instance = type(instance).objects.get(pk=instance.pk)\n+\n index.insert_or_update_object(instance)\n", "issue": "Elasticsearch backend indexes draft updates of existing pages\n### Issue Summary\r\n\r\nWhen saving a draft version of an existing live page the Elasticsearch backend reindexes the page with the draft content. The reindexed content will potentially cause frontend search results to include the page if the search query matches the draft content.\r\n\r\nI'm using the following search query in my view:\r\n\r\n search_results = Page.objects.live().search(search_query)\r\n\r\nNew content that is saved as draft is not an issue since the live() filter excludes it.\r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. Edit an indexed field of an existing published page\r\n2. Insert a unique term in the indexed field\r\n3. Click 'Save Draft'\r\n4. On the fontend search for the unique term.\r\n5. The editted page will be returned in the results\r\n\r\nI can see the value of having the draft content indexed on the Wagtail backend but since the frontend shares the same index, that can be a problem.\r\n\r\n### Technical details\r\n\r\n* Python version: 3.5.2.\r\n* Django version: 1.10.4.\r\n* Wagtail version: 1.8.\r\n* Elasticsearch: 5\r\n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django.db.models.signals import post_delete, post_save\n\nfrom wagtail.wagtailsearch import index\n\n\ndef post_save_signal_handler(instance, **kwargs):\n index.insert_or_update_object(instance)\n\n\ndef post_delete_signal_handler(instance, **kwargs):\n index.remove_object(instance)\n\n\ndef register_signal_handlers():\n # Loop through list and register signal handlers for each one\n for model in index.get_indexed_models():\n post_save.connect(post_save_signal_handler, sender=model)\n post_delete.connect(post_delete_signal_handler, sender=model)\n", "path": "wagtail/wagtailsearch/signal_handlers.py"}]}
945
181
gh_patches_debug_23357
rasdani/github-patches
git_diff
iterative__dvc-1076
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: dvc remote remove leads to Initialization error / config file error. **Setup** - dvc version 0.18.9, installed with pip, python Anaconda 3.6.4, Ubuntu 16.04 **Repro** ```bash set -e set -x rm -rf foo mkdir -p foo && cd foo git init && dvc init echo bar > bar.txt dvc remote add -d dummy s3://dummy dvc remote remove dummy dvc add bar.txt ``` ```bash + rm -rf foo + mkdir -p foo + cd foo + git init Initialized empty Git repository in /home/tmain/foo/.git/ + dvc init Adding '.dvc/state' to '.dvc/.gitignore'. Adding '.dvc/state.lock' to '.dvc/.gitignore'. Adding '.dvc/link.state' to '.dvc/.gitignore'. Adding '.dvc/link.state.lock' to '.dvc/.gitignore'. Adding '.dvc/lock' to '.dvc/.gitignore'. Adding '.dvc/config.local' to '.dvc/.gitignore'. Adding '.dvc/updater' to '.dvc/.gitignore'. Adding '.dvc/cache' to '.dvc/.gitignore'. Checking for updates... + echo bar + dvc remote add -d dummy s3://dummy Setting 'dummy' as a default remote. + dvc remote remove dummy + dvc add bar.txt Error: Initialization error: Config file error: Can't find remote section 'remote "dummy"' in config ``` </issue> <code> [start of dvc/command/remote.py] 1 import re 2 3 from dvc.config import Config 4 from dvc.command.config import CmdConfig 5 from dvc.logger import Logger 6 7 8 class CmdRemoteAdd(CmdConfig): 9 def run(self): 10 section = Config.SECTION_REMOTE_FMT.format(self.args.name) 11 ret = self.set(section, Config.SECTION_REMOTE_URL, self.args.url) 12 if ret != 0: 13 return ret 14 15 if self.args.default: 16 msg = 'Setting \'{}\' as a default remote.'.format(self.args.name) 17 Logger.info(msg) 18 ret = self.set(Config.SECTION_CORE, 19 Config.SECTION_CORE_REMOTE, 20 self.args.name) 21 22 return ret 23 24 25 class CmdRemoteRemove(CmdConfig): 26 def run(self): 27 section = Config.SECTION_REMOTE_FMT.format(self.args.name) 28 return self.unset(section, Config.SECTION_REMOTE_URL) 29 30 31 class CmdRemoteModify(CmdConfig): 32 def run(self): 33 section = Config.SECTION_REMOTE_FMT.format(self.args.name) 34 self.args.name = '{}.{}'.format(section, self.args.option) 35 return super(CmdRemoteModify, self).run() 36 37 38 class CmdRemoteList(CmdConfig): 39 def run(self): 40 for section in self.configobj.keys(): 41 r = re.match(Config.SECTION_REMOTE_REGEX, section) 42 if r: 43 name = r.group('name') 44 url = self.configobj[section].get(Config.SECTION_REMOTE_URL, 45 '') 46 Logger.info('{}\t{}'.format(name, url)) 47 return 0 48 [end of dvc/command/remote.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/command/remote.py b/dvc/command/remote.py --- a/dvc/command/remote.py +++ b/dvc/command/remote.py @@ -1,4 +1,6 @@ +import os import re +import configobj from dvc.config import Config from dvc.command.config import CmdConfig @@ -23,9 +25,35 @@ class CmdRemoteRemove(CmdConfig): + def _remove_default(self, config_file, remote): + path = os.path.join(os.path.dirname(self.config_file), + config_file) + config = configobj.ConfigObj(path) + + core = config.get(Config.SECTION_CORE, None) + if core is None: + return + + default = core.get(Config.SECTION_CORE_REMOTE, None) + if default is None: + return + + if default == remote: + del config[Config.SECTION_CORE][Config.SECTION_CORE_REMOTE] + if len(config[Config.SECTION_CORE]) == 0: + del config[Config.SECTION_CORE] + + config.write() + def run(self): section = Config.SECTION_REMOTE_FMT.format(self.args.name) - return self.unset(section, Config.SECTION_REMOTE_URL) + ret = self.unset(section) + if ret != 0: + return ret + + self._remove_default(Config.CONFIG, self.args.name) + self._remove_default(Config.CONFIG_LOCAL, self.args.name) + return 0 class CmdRemoteModify(CmdConfig):
{"golden_diff": "diff --git a/dvc/command/remote.py b/dvc/command/remote.py\n--- a/dvc/command/remote.py\n+++ b/dvc/command/remote.py\n@@ -1,4 +1,6 @@\n+import os\n import re\n+import configobj\n \n from dvc.config import Config\n from dvc.command.config import CmdConfig\n@@ -23,9 +25,35 @@\n \n \n class CmdRemoteRemove(CmdConfig):\n+ def _remove_default(self, config_file, remote):\n+ path = os.path.join(os.path.dirname(self.config_file),\n+ config_file)\n+ config = configobj.ConfigObj(path)\n+\n+ core = config.get(Config.SECTION_CORE, None)\n+ if core is None:\n+ return\n+\n+ default = core.get(Config.SECTION_CORE_REMOTE, None)\n+ if default is None:\n+ return\n+\n+ if default == remote:\n+ del config[Config.SECTION_CORE][Config.SECTION_CORE_REMOTE]\n+ if len(config[Config.SECTION_CORE]) == 0:\n+ del config[Config.SECTION_CORE]\n+\n+ config.write()\n+\n def run(self):\n section = Config.SECTION_REMOTE_FMT.format(self.args.name)\n- return self.unset(section, Config.SECTION_REMOTE_URL)\n+ ret = self.unset(section)\n+ if ret != 0:\n+ return ret\n+\n+ self._remove_default(Config.CONFIG, self.args.name)\n+ self._remove_default(Config.CONFIG_LOCAL, self.args.name)\n+ return 0\n \n \n class CmdRemoteModify(CmdConfig):\n", "issue": "Bug: dvc remote remove leads to Initialization error / config file error.\n**Setup**\r\n\r\n- dvc version 0.18.9, installed with pip, python Anaconda 3.6.4, Ubuntu 16.04\r\n\r\n**Repro**\r\n\r\n```bash\r\nset -e\r\nset -x\r\nrm -rf foo\r\nmkdir -p foo && cd foo\r\ngit init && dvc init\r\necho bar > bar.txt\r\ndvc remote add -d dummy s3://dummy\r\ndvc remote remove dummy\r\ndvc add bar.txt\r\n```\r\n\r\n```bash\r\n+ rm -rf foo\r\n+ mkdir -p foo\r\n+ cd foo\r\n+ git init\r\nInitialized empty Git repository in /home/tmain/foo/.git/\r\n+ dvc init\r\nAdding '.dvc/state' to '.dvc/.gitignore'.\r\nAdding '.dvc/state.lock' to '.dvc/.gitignore'.\r\nAdding '.dvc/link.state' to '.dvc/.gitignore'.\r\nAdding '.dvc/link.state.lock' to '.dvc/.gitignore'.\r\nAdding '.dvc/lock' to '.dvc/.gitignore'.\r\nAdding '.dvc/config.local' to '.dvc/.gitignore'.\r\nAdding '.dvc/updater' to '.dvc/.gitignore'.\r\nAdding '.dvc/cache' to '.dvc/.gitignore'.\r\nChecking for updates...\r\n+ echo bar\r\n+ dvc remote add -d dummy s3://dummy\r\nSetting 'dummy' as a default remote.\r\n+ dvc remote remove dummy\r\n+ dvc add bar.txt\r\nError: Initialization error: Config file error: Can't find remote section 'remote \"dummy\"' in config\r\n\r\n```\n", "before_files": [{"content": "import re\n\nfrom dvc.config import Config\nfrom dvc.command.config import CmdConfig\nfrom dvc.logger import Logger\n\n\nclass CmdRemoteAdd(CmdConfig):\n def run(self):\n section = Config.SECTION_REMOTE_FMT.format(self.args.name)\n ret = self.set(section, Config.SECTION_REMOTE_URL, self.args.url)\n if ret != 0:\n return ret\n\n if self.args.default:\n msg = 'Setting \\'{}\\' as a default remote.'.format(self.args.name)\n Logger.info(msg)\n ret = self.set(Config.SECTION_CORE,\n Config.SECTION_CORE_REMOTE,\n self.args.name)\n\n return ret\n\n\nclass CmdRemoteRemove(CmdConfig):\n def run(self):\n section = Config.SECTION_REMOTE_FMT.format(self.args.name)\n return self.unset(section, Config.SECTION_REMOTE_URL)\n\n\nclass CmdRemoteModify(CmdConfig):\n def run(self):\n section = Config.SECTION_REMOTE_FMT.format(self.args.name)\n self.args.name = '{}.{}'.format(section, self.args.option)\n return super(CmdRemoteModify, self).run()\n\n\nclass CmdRemoteList(CmdConfig):\n def run(self):\n for section in self.configobj.keys():\n r = re.match(Config.SECTION_REMOTE_REGEX, section)\n if r:\n name = r.group('name')\n url = self.configobj[section].get(Config.SECTION_REMOTE_URL,\n '')\n Logger.info('{}\\t{}'.format(name, url))\n return 0\n", "path": "dvc/command/remote.py"}]}
1,301
343
gh_patches_debug_12525
rasdani/github-patches
git_diff
ethereum__web3.py-306
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Transform Markdown on PyPi release ### What was wrong? README is not readable on PyPI: https://pypi.python.org/pypi/web3/1.4.0 `setuptools-markdown` allows to publish README.md on PyPi - https://pypi.python.org/pypi/setuptools-markdown #### Cute Animal Picture ![Koala](http://i.imgur.com/Y3FZh.jpg) </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 import os 4 import sys 5 6 from setuptools import ( 7 setup, 8 find_packages, 9 ) 10 11 12 DIR = os.path.dirname(os.path.abspath(__file__)) 13 14 15 readme = open(os.path.join(DIR, 'README.md')).read() 16 17 install_requires = [ 18 "cytoolz>=0.8.2", 19 "ethereum-abi-utils>=0.4.0", 20 "ethereum-utils>=0.4.0", 21 "pylru>=1.0.9", 22 "pysha3>=0.3", 23 "requests>=2.12.4", 24 "rlp>=0.4.7", 25 "toolz>=0.8.2", 26 ] 27 28 if sys.platform == 'win32': 29 install_requires.append('pypiwin32') 30 31 setup( 32 name='web3', 33 version='3.13.5', 34 description="""Web3.py""", 35 long_description=readme, 36 author='Piper Merriam', 37 author_email='[email protected]', 38 url='https://github.com/pipermerriam/web3.py', 39 include_package_data=True, 40 install_requires=install_requires, 41 extras_require={ 42 'tester': ["eth-testrpc>=1.2.0"], 43 'gevent': [ 44 "gevent>=1.1.1,<1.2.0", 45 "geventhttpclient>=1.3.1", 46 ], 47 }, 48 py_modules=['web3'], 49 license="MIT", 50 zip_safe=False, 51 keywords='ethereum', 52 packages=find_packages(exclude=["tests", "tests.*"]), 53 classifiers=[ 54 'Development Status :: 2 - Pre-Alpha', 55 'Intended Audience :: Developers', 56 'License :: OSI Approved :: MIT License', 57 'Natural Language :: English', 58 'Programming Language :: Python :: 2', 59 'Programming Language :: Python :: 2.7', 60 'Programming Language :: Python :: 3', 61 'Programming Language :: Python :: 3.4', 62 'Programming Language :: Python :: 3.5', 63 ], 64 ) 65 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -32,12 +32,13 @@ name='web3', version='3.13.5', description="""Web3.py""", - long_description=readme, + long_description_markdown_filename='README.md', author='Piper Merriam', author_email='[email protected]', url='https://github.com/pipermerriam/web3.py', include_package_data=True, install_requires=install_requires, + setup_requires=['setuptools-markdown'], extras_require={ 'tester': ["eth-testrpc>=1.2.0"], 'gevent': [
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -32,12 +32,13 @@\n name='web3',\n version='3.13.5',\n description=\"\"\"Web3.py\"\"\",\n- long_description=readme,\n+ long_description_markdown_filename='README.md',\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/pipermerriam/web3.py',\n include_package_data=True,\n install_requires=install_requires,\n+ setup_requires=['setuptools-markdown'],\n extras_require={\n 'tester': [\"eth-testrpc>=1.2.0\"],\n 'gevent': [\n", "issue": "Transform Markdown on PyPi release\n### What was wrong?\n\nREADME is not readable on PyPI: https://pypi.python.org/pypi/web3/1.4.0\n\n`setuptools-markdown` allows to publish README.md on PyPi\n- https://pypi.python.org/pypi/setuptools-markdown\n#### Cute Animal Picture\n\n![Koala](http://i.imgur.com/Y3FZh.jpg)\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\n\nfrom setuptools import (\n setup,\n find_packages,\n)\n\n\nDIR = os.path.dirname(os.path.abspath(__file__))\n\n\nreadme = open(os.path.join(DIR, 'README.md')).read()\n\ninstall_requires = [\n \"cytoolz>=0.8.2\",\n \"ethereum-abi-utils>=0.4.0\",\n \"ethereum-utils>=0.4.0\",\n \"pylru>=1.0.9\",\n \"pysha3>=0.3\",\n \"requests>=2.12.4\",\n \"rlp>=0.4.7\",\n \"toolz>=0.8.2\",\n]\n\nif sys.platform == 'win32':\n install_requires.append('pypiwin32')\n\nsetup(\n name='web3',\n version='3.13.5',\n description=\"\"\"Web3.py\"\"\",\n long_description=readme,\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/pipermerriam/web3.py',\n include_package_data=True,\n install_requires=install_requires,\n extras_require={\n 'tester': [\"eth-testrpc>=1.2.0\"],\n 'gevent': [\n \"gevent>=1.1.1,<1.2.0\",\n \"geventhttpclient>=1.3.1\",\n ],\n },\n py_modules=['web3'],\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n)\n", "path": "setup.py"}]}
1,194
161
gh_patches_debug_6493
rasdani/github-patches
git_diff
pwr-Solaar__Solaar-2305
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Extend Makefile with installation and test targets **Information** - Solaar version: 1.1.11rc4 **Is your feature request related to a problem? Please describe.** The Solaar installation with all its dependencies (pip, apt, udev etc.) is cumbersome. Although some setup steps for GItHub CI exists, they are not usable for local setup of Solaar. **Describe the solution you'd like** Move the setup commands into a Makefile and use the targets in GitHub workflow files. Thus, the commands are in a single place and also usable for local setups. **Additional context** This extends #2263 </issue> <code> [start of setup.py] 1 #!/usr/bin/env python3 2 import subprocess 3 4 from glob import glob as _glob 5 6 try: 7 from setuptools import setup 8 except ImportError: 9 from distutils.core import setup 10 11 NAME = 'Solaar' 12 13 with open('lib/solaar/version', 'r') as vfile: 14 version = vfile.read().strip() 15 16 try: # get commit from git describe 17 commit = subprocess.check_output(['git', 'describe', '--always'], stderr=subprocess.DEVNULL).strip().decode() 18 with open('lib/solaar/commit', 'w') as vfile: 19 vfile.write(f'{commit}\n') 20 except Exception: # get commit from Ubuntu dpkg-parsechangelog 21 try: 22 commit = subprocess.check_output(['dpkg-parsechangelog', '--show-field', 'Version'], 23 stderr=subprocess.DEVNULL).strip().decode() 24 commit = commit.split('~') 25 with open('lib/solaar/commit', 'w') as vfile: 26 vfile.write(f'{commit[0]}\n') 27 except Exception as e: 28 print('Exception using dpkg-parsechangelog', e) 29 30 31 def _data_files(): 32 from os.path import dirname as _dirname 33 34 yield 'share/icons/hicolor/scalable/apps', _glob('share/solaar/icons/solaar*.svg') 35 yield 'share/icons/hicolor/32x32/apps', _glob('share/solaar/icons/solaar-light_*.png') 36 37 for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'): 38 yield _dirname(mo), [mo] 39 40 yield 'share/applications', ['share/applications/solaar.desktop'] 41 yield 'lib/udev/rules.d', ['rules.d/42-logitech-unify-permissions.rules'] 42 yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml'] 43 44 del _dirname 45 46 47 setup( 48 name=NAME.lower(), 49 version=version, 50 description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.', 51 long_description=''' 52 Solaar is a Linux device manager for many Logitech peripherals that connect through 53 Unifying and other receivers or via USB or Bluetooth. 54 Solaar is able to pair/unpair devices with receivers and show and modify some of the 55 modifiable features of devices. 56 For instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(), 57 author='Daniel Pavel', 58 license='GPLv2', 59 url='http://pwr-solaar.github.io/Solaar/', 60 classifiers=[ 61 'Development Status :: 4 - Beta', 62 'Environment :: X11 Applications :: GTK', 63 'Environment :: Console', 64 'Intended Audience :: End Users/Desktop', 65 'License :: DFSG approved', 66 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', 67 'Natural Language :: English', 68 'Programming Language :: Python :: 3 :: Only', 69 'Operating System :: POSIX :: Linux', 70 'Topic :: Utilities', 71 ], 72 platforms=['linux'], 73 74 # sudo apt install python-gi python3-gi \ 75 # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1 76 # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'], 77 python_requires='>=3.7', 78 install_requires=[ 79 'evdev (>= 1.1.2) ; platform_system=="Linux"', 80 'pyudev (>= 0.13)', 81 'PyYAML (>= 3.12)', 82 'python-xlib (>= 0.27)', 83 'psutil (>= 5.4.3)', 84 'dbus-python ; platform_system=="Linux"', 85 ], 86 extras_require={ 87 'report-descriptor': ['hid-parser'], 88 'desktop-notifications': ['Notify (>= 0.7)'], 89 'git-commit': ['python-git-info'], 90 'test': ['pytest'], 91 }, 92 package_dir={'': 'lib'}, 93 packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'], 94 data_files=list(_data_files()), 95 include_package_data=True, 96 scripts=_glob('bin/*'), 97 ) 98 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -87,7 +87,7 @@ 'report-descriptor': ['hid-parser'], 'desktop-notifications': ['Notify (>= 0.7)'], 'git-commit': ['python-git-info'], - 'test': ['pytest'], + 'test': ['pytest', 'pytest-cov'], }, package_dir={'': 'lib'}, packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -87,7 +87,7 @@\n 'report-descriptor': ['hid-parser'],\n 'desktop-notifications': ['Notify (>= 0.7)'],\n 'git-commit': ['python-git-info'],\n- 'test': ['pytest'],\n+ 'test': ['pytest', 'pytest-cov'],\n },\n package_dir={'': 'lib'},\n packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n", "issue": "Extend Makefile with installation and test targets\n**Information**\r\n- Solaar version: 1.1.11rc4\r\n\r\n**Is your feature request related to a problem? Please describe.**\r\nThe Solaar installation with all its dependencies (pip, apt, udev etc.) is cumbersome. Although some setup steps for GItHub CI exists, they are not usable for local setup of Solaar. \r\n\r\n**Describe the solution you'd like**\r\nMove the setup commands into a Makefile and use the targets in GitHub workflow files. Thus, the commands are in a single place and also usable for local setups.\r\n\r\n**Additional context**\r\nThis extends #2263 \r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport subprocess\n\nfrom glob import glob as _glob\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nNAME = 'Solaar'\n\nwith open('lib/solaar/version', 'r') as vfile:\n version = vfile.read().strip()\n\ntry: # get commit from git describe\n commit = subprocess.check_output(['git', 'describe', '--always'], stderr=subprocess.DEVNULL).strip().decode()\n with open('lib/solaar/commit', 'w') as vfile:\n vfile.write(f'{commit}\\n')\nexcept Exception: # get commit from Ubuntu dpkg-parsechangelog\n try:\n commit = subprocess.check_output(['dpkg-parsechangelog', '--show-field', 'Version'],\n stderr=subprocess.DEVNULL).strip().decode()\n commit = commit.split('~')\n with open('lib/solaar/commit', 'w') as vfile:\n vfile.write(f'{commit[0]}\\n')\n except Exception as e:\n print('Exception using dpkg-parsechangelog', e)\n\n\ndef _data_files():\n from os.path import dirname as _dirname\n\n yield 'share/icons/hicolor/scalable/apps', _glob('share/solaar/icons/solaar*.svg')\n yield 'share/icons/hicolor/32x32/apps', _glob('share/solaar/icons/solaar-light_*.png')\n\n for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):\n yield _dirname(mo), [mo]\n\n yield 'share/applications', ['share/applications/solaar.desktop']\n yield 'lib/udev/rules.d', ['rules.d/42-logitech-unify-permissions.rules']\n yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml']\n\n del _dirname\n\n\nsetup(\n name=NAME.lower(),\n version=version,\n description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.',\n long_description='''\nSolaar is a Linux device manager for many Logitech peripherals that connect through\nUnifying and other receivers or via USB or Bluetooth.\nSolaar is able to pair/unpair devices with receivers and show and modify some of the\nmodifiable features of devices.\nFor instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(),\n author='Daniel Pavel',\n license='GPLv2',\n url='http://pwr-solaar.github.io/Solaar/',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: X11 Applications :: GTK',\n 'Environment :: Console',\n 'Intended Audience :: End Users/Desktop',\n 'License :: DFSG approved',\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3 :: Only',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Utilities',\n ],\n platforms=['linux'],\n\n # sudo apt install python-gi python3-gi \\\n # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1\n # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],\n python_requires='>=3.7',\n install_requires=[\n 'evdev (>= 1.1.2) ; platform_system==\"Linux\"',\n 'pyudev (>= 0.13)',\n 'PyYAML (>= 3.12)',\n 'python-xlib (>= 0.27)',\n 'psutil (>= 5.4.3)',\n 'dbus-python ; platform_system==\"Linux\"',\n ],\n extras_require={\n 'report-descriptor': ['hid-parser'],\n 'desktop-notifications': ['Notify (>= 0.7)'],\n 'git-commit': ['python-git-info'],\n 'test': ['pytest'],\n },\n package_dir={'': 'lib'},\n packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n data_files=list(_data_files()),\n include_package_data=True,\n scripts=_glob('bin/*'),\n)\n", "path": "setup.py"}]}
1,821
131
gh_patches_debug_21755
rasdani/github-patches
git_diff
deepchecks__deepchecks-613
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [DOCS] Simple Model Comparison example improvement Currently, the Simple Model Comparison notebook lacks some explanations. Please follow the guidelines from this issue: #543 to improve it </issue> <code> [start of deepchecks/utils/validation.py] 1 # ---------------------------------------------------------------------------- 2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com) 3 # 4 # This file is part of Deepchecks. 5 # Deepchecks is distributed under the terms of the GNU Affero General 6 # Public License (version 3 or later). 7 # You should have received a copy of the GNU Affero General Public License 8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>. 9 # ---------------------------------------------------------------------------- 10 # 11 """objects validation utilities.""" 12 import typing as t 13 14 import pandas as pd 15 16 from deepchecks import base # pylint: disable=unused-import, is used in type annotations 17 from deepchecks import errors 18 from deepchecks.utils.typing import Hashable, BasicModel 19 20 __all__ = ['model_type_validation', 'ensure_hashable_or_mutable_sequence', 'validate_model', 'ensure_dataframe_type'] 21 22 23 def model_type_validation(model: t.Any): 24 """Receive any object and check if it's an instance of a model we support. 25 26 Raises: 27 DeepchecksValueError: If the object is not of a supported type 28 """ 29 if not isinstance(model, BasicModel): 30 raise errors.DeepchecksValueError( 31 'Model must inherit from one of supported ' 32 'models: sklearn.base.BaseEstimator or CatBoost, ' 33 f'Received: {model.__class__.__name__}' 34 ) 35 36 37 def validate_model( 38 data: t.Union['base.Dataset', pd.DataFrame], 39 model: t.Any 40 ): 41 """Check model is able to predict on the dataset. 42 43 Args: 44 data (Dataset, pandas.DataFrame): 45 model (BaseEstimator): 46 47 Raise: 48 DeepchecksValueError: if dataset does not match model 49 """ 50 model_type_validation(model) 51 52 error_message = ( 53 'In order to evaluate model correctness we need not empty dataset ' 54 'with the same set of features that was used to fit the model. {0}' 55 ) 56 57 if isinstance(data, base.Dataset): 58 features = data.features_columns 59 features_names = set(data.features) 60 else: 61 features = data 62 features_names = set(data.columns) 63 64 model_features = getattr(model, 'feature_names_in_', None) 65 66 if features is None: 67 raise errors.DeepchecksValueError(error_message.format( 68 'But function received dataset without feature columns.' 69 )) 70 71 if len(features) == 0: 72 raise errors.DeepchecksValueError(error_message.format( 73 'But function received empty dataset.' 74 )) 75 76 try: 77 model_features = set(model_features) # type: ignore 78 if model_features != features_names: 79 raise errors.DeepchecksValueError(error_message.format( 80 'But function received dataset with a different set of features.' 81 )) 82 except (TypeError, ValueError): 83 # in case if 'model.feature_names_in_' was None or not iterable 84 pass 85 86 try: 87 model.predict(features.head(1)) 88 except Exception as exc: 89 raise errors.DeepchecksValueError( 90 f'Got error when trying to predict with model on dataset: {str(exc)}' 91 ) 92 93 94 T = t.TypeVar('T', bound=Hashable) 95 96 97 def ensure_hashable_or_mutable_sequence( 98 value: t.Union[T, t.MutableSequence[T]], 99 message: str = ( 100 'Provided value is neither hashable nor mutable ' 101 'sequence of hashable items. Got {type}') 102 ) -> t.List[T]: 103 """Validate that provided value is either hashable or mutable sequence of hashable values.""" 104 if isinstance(value, Hashable): 105 return [value] 106 107 if isinstance(value, t.MutableSequence): 108 if len(value) > 0 and not isinstance(value[0], Hashable): 109 raise errors.DeepchecksValueError(message.format( 110 type=f'MutableSequence[{type(value).__name__}]' 111 )) 112 return list(value) 113 114 raise errors.DeepchecksValueError(message.format( 115 type=type(value).__name__ 116 )) 117 118 119 def ensure_dataframe_type(obj: t.Any) -> pd.DataFrame: 120 """Ensure that given object is of type DataFrame or Dataset and return it as DataFrame. else raise error. 121 122 Args: 123 obj: Object to ensure it is DataFrame or Dataset 124 125 Returns: 126 (pd.DataFrame) 127 """ 128 if isinstance(obj, pd.DataFrame): 129 return obj 130 elif isinstance(obj, base.Dataset): 131 return obj.data 132 else: 133 raise errors.DeepchecksValueError( 134 f'dataset must be of type DataFrame or Dataset, but got: {type(obj).__name__}' 135 ) 136 [end of deepchecks/utils/validation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/deepchecks/utils/validation.py b/deepchecks/utils/validation.py --- a/deepchecks/utils/validation.py +++ b/deepchecks/utils/validation.py @@ -56,12 +56,8 @@ if isinstance(data, base.Dataset): features = data.features_columns - features_names = set(data.features) else: features = data - features_names = set(data.columns) - - model_features = getattr(model, 'feature_names_in_', None) if features is None: raise errors.DeepchecksValueError(error_message.format( @@ -73,16 +69,6 @@ 'But function received empty dataset.' )) - try: - model_features = set(model_features) # type: ignore - if model_features != features_names: - raise errors.DeepchecksValueError(error_message.format( - 'But function received dataset with a different set of features.' - )) - except (TypeError, ValueError): - # in case if 'model.feature_names_in_' was None or not iterable - pass - try: model.predict(features.head(1)) except Exception as exc:
{"golden_diff": "diff --git a/deepchecks/utils/validation.py b/deepchecks/utils/validation.py\n--- a/deepchecks/utils/validation.py\n+++ b/deepchecks/utils/validation.py\n@@ -56,12 +56,8 @@\n \n if isinstance(data, base.Dataset):\n features = data.features_columns\n- features_names = set(data.features)\n else:\n features = data\n- features_names = set(data.columns)\n-\n- model_features = getattr(model, 'feature_names_in_', None)\n \n if features is None:\n raise errors.DeepchecksValueError(error_message.format(\n@@ -73,16 +69,6 @@\n 'But function received empty dataset.'\n ))\n \n- try:\n- model_features = set(model_features) # type: ignore\n- if model_features != features_names:\n- raise errors.DeepchecksValueError(error_message.format(\n- 'But function received dataset with a different set of features.'\n- ))\n- except (TypeError, ValueError):\n- # in case if 'model.feature_names_in_' was None or not iterable\n- pass\n-\n try:\n model.predict(features.head(1))\n except Exception as exc:\n", "issue": "[DOCS] Simple Model Comparison example improvement \nCurrently, the Simple Model Comparison notebook lacks some explanations.\r\nPlease follow the guidelines from this issue: #543 to improve it\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"objects validation utilities.\"\"\"\nimport typing as t\n\nimport pandas as pd\n\nfrom deepchecks import base # pylint: disable=unused-import, is used in type annotations\nfrom deepchecks import errors\nfrom deepchecks.utils.typing import Hashable, BasicModel\n\n__all__ = ['model_type_validation', 'ensure_hashable_or_mutable_sequence', 'validate_model', 'ensure_dataframe_type']\n\n\ndef model_type_validation(model: t.Any):\n \"\"\"Receive any object and check if it's an instance of a model we support.\n\n Raises:\n DeepchecksValueError: If the object is not of a supported type\n \"\"\"\n if not isinstance(model, BasicModel):\n raise errors.DeepchecksValueError(\n 'Model must inherit from one of supported '\n 'models: sklearn.base.BaseEstimator or CatBoost, '\n f'Received: {model.__class__.__name__}'\n )\n\n\ndef validate_model(\n data: t.Union['base.Dataset', pd.DataFrame],\n model: t.Any\n):\n \"\"\"Check model is able to predict on the dataset.\n\n Args:\n data (Dataset, pandas.DataFrame):\n model (BaseEstimator):\n\n Raise:\n DeepchecksValueError: if dataset does not match model\n \"\"\"\n model_type_validation(model)\n\n error_message = (\n 'In order to evaluate model correctness we need not empty dataset '\n 'with the same set of features that was used to fit the model. {0}'\n )\n\n if isinstance(data, base.Dataset):\n features = data.features_columns\n features_names = set(data.features)\n else:\n features = data\n features_names = set(data.columns)\n\n model_features = getattr(model, 'feature_names_in_', None)\n\n if features is None:\n raise errors.DeepchecksValueError(error_message.format(\n 'But function received dataset without feature columns.'\n ))\n\n if len(features) == 0:\n raise errors.DeepchecksValueError(error_message.format(\n 'But function received empty dataset.'\n ))\n\n try:\n model_features = set(model_features) # type: ignore\n if model_features != features_names:\n raise errors.DeepchecksValueError(error_message.format(\n 'But function received dataset with a different set of features.'\n ))\n except (TypeError, ValueError):\n # in case if 'model.feature_names_in_' was None or not iterable\n pass\n\n try:\n model.predict(features.head(1))\n except Exception as exc:\n raise errors.DeepchecksValueError(\n f'Got error when trying to predict with model on dataset: {str(exc)}'\n )\n\n\nT = t.TypeVar('T', bound=Hashable)\n\n\ndef ensure_hashable_or_mutable_sequence(\n value: t.Union[T, t.MutableSequence[T]],\n message: str = (\n 'Provided value is neither hashable nor mutable '\n 'sequence of hashable items. Got {type}')\n) -> t.List[T]:\n \"\"\"Validate that provided value is either hashable or mutable sequence of hashable values.\"\"\"\n if isinstance(value, Hashable):\n return [value]\n\n if isinstance(value, t.MutableSequence):\n if len(value) > 0 and not isinstance(value[0], Hashable):\n raise errors.DeepchecksValueError(message.format(\n type=f'MutableSequence[{type(value).__name__}]'\n ))\n return list(value)\n\n raise errors.DeepchecksValueError(message.format(\n type=type(value).__name__\n ))\n\n\ndef ensure_dataframe_type(obj: t.Any) -> pd.DataFrame:\n \"\"\"Ensure that given object is of type DataFrame or Dataset and return it as DataFrame. else raise error.\n\n Args:\n obj: Object to ensure it is DataFrame or Dataset\n\n Returns:\n (pd.DataFrame)\n \"\"\"\n if isinstance(obj, pd.DataFrame):\n return obj\n elif isinstance(obj, base.Dataset):\n return obj.data\n else:\n raise errors.DeepchecksValueError(\n f'dataset must be of type DataFrame or Dataset, but got: {type(obj).__name__}'\n )\n", "path": "deepchecks/utils/validation.py"}]}
1,832
255
gh_patches_debug_15150
rasdani/github-patches
git_diff
huggingface__dataset-viewer-410
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error with RGBA images https://huggingface.co/datasets/huggan/few-shot-skulls ``` Status code: 500 Exception: Status500Error Message: cannot write mode RGBA as JPEG ``` reported by @NielsRogge </issue> <code> [start of services/worker/src/worker/models/column/image.py] 1 from typing import Any, List 2 3 from datasets import Image 4 from PIL import Image as PILImage # type: ignore 5 6 from worker.models.asset import create_image_file 7 from worker.models.column.default import ( 8 Cell, 9 CellTypeError, 10 ColumnInferenceError, 11 ColumnTypeError, 12 CommonColumn, 13 ) 14 15 16 def check_value(value: Any) -> None: 17 if value is None: 18 return 19 if not isinstance(value, PILImage.Image): 20 raise CellTypeError("image cell must be a PIL image") 21 22 23 def infer_from_values(values: List[Any]) -> None: 24 for value in values: 25 check_value(value) 26 if values and all(value is None for value in values): 27 raise ColumnInferenceError("all the values are None, cannot infer column type") 28 29 30 class ImageColumn(CommonColumn): 31 def __init__(self, name: str, feature: Any, values: List[Any]): 32 if feature: 33 if not isinstance(feature, Image): 34 raise ColumnTypeError("feature type mismatch") 35 else: 36 infer_from_values(values) 37 self.name = name 38 self.type = "RELATIVE_IMAGE_URL" 39 40 def get_cell_value(self, dataset_name: str, config_name: str, split_name: str, row_idx: int, value: Any) -> Cell: 41 if value is None: 42 return None 43 check_value(value) 44 # this function can raise, we don't catch it 45 return create_image_file(dataset_name, config_name, split_name, row_idx, self.name, "image.jpg", value) 46 [end of services/worker/src/worker/models/column/image.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/services/worker/src/worker/models/column/image.py b/services/worker/src/worker/models/column/image.py --- a/services/worker/src/worker/models/column/image.py +++ b/services/worker/src/worker/models/column/image.py @@ -41,5 +41,15 @@ if value is None: return None check_value(value) - # this function can raise, we don't catch it - return create_image_file(dataset_name, config_name, split_name, row_idx, self.name, "image.jpg", value) + # attempt to generate one of the supported formats; if unsuccessful, throw an error + for ext in [".jpg", ".png"]: + try: + return create_image_file( + dataset_name, config_name, split_name, row_idx, self.name, f"image{ext}", value + ) + except OSError: + # if wrong format, try the next one, see https://github.com/huggingface/datasets-server/issues/191 + # OSError: cannot write mode P as JPEG + # OSError: cannot write mode RGBA as JPEG + continue + raise ValueError("Image cannot be written as JPEG or PNG")
{"golden_diff": "diff --git a/services/worker/src/worker/models/column/image.py b/services/worker/src/worker/models/column/image.py\n--- a/services/worker/src/worker/models/column/image.py\n+++ b/services/worker/src/worker/models/column/image.py\n@@ -41,5 +41,15 @@\n if value is None:\n return None\n check_value(value)\n- # this function can raise, we don't catch it\n- return create_image_file(dataset_name, config_name, split_name, row_idx, self.name, \"image.jpg\", value)\n+ # attempt to generate one of the supported formats; if unsuccessful, throw an error\n+ for ext in [\".jpg\", \".png\"]:\n+ try:\n+ return create_image_file(\n+ dataset_name, config_name, split_name, row_idx, self.name, f\"image{ext}\", value\n+ )\n+ except OSError:\n+ # if wrong format, try the next one, see https://github.com/huggingface/datasets-server/issues/191\n+ # OSError: cannot write mode P as JPEG\n+ # OSError: cannot write mode RGBA as JPEG\n+ continue\n+ raise ValueError(\"Image cannot be written as JPEG or PNG\")\n", "issue": "Error with RGBA images\nhttps://huggingface.co/datasets/huggan/few-shot-skulls\r\n\r\n```\r\nStatus code: 500\r\nException: Status500Error\r\nMessage: cannot write mode RGBA as JPEG\r\n```\r\n\r\nreported by @NielsRogge \r\n\r\n\n", "before_files": [{"content": "from typing import Any, List\n\nfrom datasets import Image\nfrom PIL import Image as PILImage # type: ignore\n\nfrom worker.models.asset import create_image_file\nfrom worker.models.column.default import (\n Cell,\n CellTypeError,\n ColumnInferenceError,\n ColumnTypeError,\n CommonColumn,\n)\n\n\ndef check_value(value: Any) -> None:\n if value is None:\n return\n if not isinstance(value, PILImage.Image):\n raise CellTypeError(\"image cell must be a PIL image\")\n\n\ndef infer_from_values(values: List[Any]) -> None:\n for value in values:\n check_value(value)\n if values and all(value is None for value in values):\n raise ColumnInferenceError(\"all the values are None, cannot infer column type\")\n\n\nclass ImageColumn(CommonColumn):\n def __init__(self, name: str, feature: Any, values: List[Any]):\n if feature:\n if not isinstance(feature, Image):\n raise ColumnTypeError(\"feature type mismatch\")\n else:\n infer_from_values(values)\n self.name = name\n self.type = \"RELATIVE_IMAGE_URL\"\n\n def get_cell_value(self, dataset_name: str, config_name: str, split_name: str, row_idx: int, value: Any) -> Cell:\n if value is None:\n return None\n check_value(value)\n # this function can raise, we don't catch it\n return create_image_file(dataset_name, config_name, split_name, row_idx, self.name, \"image.jpg\", value)\n", "path": "services/worker/src/worker/models/column/image.py"}]}
1,022
270
gh_patches_debug_32029
rasdani/github-patches
git_diff
PrefectHQ__prefect-3465
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Set flow labels through the CLI register command ## Current behavior Right now there is no way to add a label to a **flow** when registering it using the CLI. You only can set the label from inside the flow's code. ## Proposed behavior It would be really nice to have a `--label` parameter (just like the one we have for agents) that would allow us to add labels on the fly when registering a flow from the CLI. ## Example Adding the parameter would look something like this: `prefect register flow --file my_flow.py --name My-Flow --label my-label` or this: `prefect register flow --file my_flow.py --name My-Flow -l my-label` This could be very useful for registering flows with continuous integration depending on the environment you want to build the flow for (for example, `dev` or `prod`). Thanks a lot and keep it up! </issue> <code> [start of src/prefect/cli/register.py] 1 import os 2 3 import click 4 5 import prefect 6 from prefect.utilities.storage import extract_flow_from_file 7 8 9 @click.group(hidden=True) 10 def register(): 11 """ 12 Register flows 13 14 \b 15 Usage: 16 $ prefect register [OBJECT] 17 18 \b 19 Arguments: 20 flow Register flows with a backend API 21 22 \b 23 Examples: 24 $ prefect register flow --file my_flow.py --name My-Flow 25 """ 26 27 28 @register.command( 29 hidden=True, 30 context_settings=dict(ignore_unknown_options=True, allow_extra_args=True), 31 ) 32 @click.option( 33 "--file", 34 "-f", 35 required=True, 36 help="A file that contains a flow", 37 hidden=True, 38 default=None, 39 type=click.Path(exists=True), 40 ) 41 @click.option( 42 "--name", 43 "-n", 44 required=False, 45 help="The `flow.name` to pull out of the file provided.", 46 hidden=True, 47 default=None, 48 ) 49 @click.option( 50 "--project", 51 "-p", 52 required=False, 53 help="The name of a Prefect project to register this flow.", 54 hidden=True, 55 default=None, 56 ) 57 def flow(file, name, project): 58 """ 59 Register a flow from a file. This call will pull a Flow object out of a `.py` file 60 and call `flow.register` on it. 61 62 \b 63 Options: 64 --file, -f TEXT The path to a local file which contains a flow [required] 65 --name, -n TEXT The `flow.name` to pull out of the file provided. If a name 66 is not provided then the first flow object found will be registered. 67 --project TEXT The name of a Prefect project to register this flow 68 69 \b 70 Examples: 71 $ prefect register flow --file my_flow.py --name My-Flow 72 """ 73 74 # Don't run extra `run` and `register` functions inside file 75 with prefect.context({"loading_flow": True}): 76 file_path = os.path.abspath(file) 77 flow_obj = extract_flow_from_file(file_path=file_path, flow_name=name) 78 79 flow_obj.register(project_name=project) 80 [end of src/prefect/cli/register.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/prefect/cli/register.py b/src/prefect/cli/register.py --- a/src/prefect/cli/register.py +++ b/src/prefect/cli/register.py @@ -54,7 +54,14 @@ hidden=True, default=None, ) -def flow(file, name, project): [email protected]( + "--label", + "-l", + required=False, + hidden=True, + multiple=True, +) +def flow(file, name, project, label): """ Register a flow from a file. This call will pull a Flow object out of a `.py` file and call `flow.register` on it. @@ -64,16 +71,23 @@ --file, -f TEXT The path to a local file which contains a flow [required] --name, -n TEXT The `flow.name` to pull out of the file provided. If a name is not provided then the first flow object found will be registered. - --project TEXT The name of a Prefect project to register this flow + --project, -p TEXT The name of a Prefect project to register this flow + --label, -l TEXT A label to set on the flow, extending any existing labels. + Multiple labels are supported, eg. `-l label1 -l label2`. \b Examples: - $ prefect register flow --file my_flow.py --name My-Flow + $ prefect register flow --file my_flow.py --name My-Flow -l label1 -l label2 """ # Don't run extra `run` and `register` functions inside file with prefect.context({"loading_flow": True}): file_path = os.path.abspath(file) - flow_obj = extract_flow_from_file(file_path=file_path, flow_name=name) + flow = extract_flow_from_file(file_path=file_path, flow_name=name) + + if getattr(flow, "run_config", None) is not None: + flow.run_config.labels.update(label) + else: + flow.environment.labels.update(label) - flow_obj.register(project_name=project) + flow.register(project_name=project)
{"golden_diff": "diff --git a/src/prefect/cli/register.py b/src/prefect/cli/register.py\n--- a/src/prefect/cli/register.py\n+++ b/src/prefect/cli/register.py\n@@ -54,7 +54,14 @@\n hidden=True,\n default=None,\n )\n-def flow(file, name, project):\[email protected](\n+ \"--label\",\n+ \"-l\",\n+ required=False,\n+ hidden=True,\n+ multiple=True,\n+)\n+def flow(file, name, project, label):\n \"\"\"\n Register a flow from a file. This call will pull a Flow object out of a `.py` file\n and call `flow.register` on it.\n@@ -64,16 +71,23 @@\n --file, -f TEXT The path to a local file which contains a flow [required]\n --name, -n TEXT The `flow.name` to pull out of the file provided. If a name\n is not provided then the first flow object found will be registered.\n- --project TEXT The name of a Prefect project to register this flow\n+ --project, -p TEXT The name of a Prefect project to register this flow\n+ --label, -l TEXT A label to set on the flow, extending any existing labels.\n+ Multiple labels are supported, eg. `-l label1 -l label2`.\n \n \\b\n Examples:\n- $ prefect register flow --file my_flow.py --name My-Flow\n+ $ prefect register flow --file my_flow.py --name My-Flow -l label1 -l label2\n \"\"\"\n \n # Don't run extra `run` and `register` functions inside file\n with prefect.context({\"loading_flow\": True}):\n file_path = os.path.abspath(file)\n- flow_obj = extract_flow_from_file(file_path=file_path, flow_name=name)\n+ flow = extract_flow_from_file(file_path=file_path, flow_name=name)\n+\n+ if getattr(flow, \"run_config\", None) is not None:\n+ flow.run_config.labels.update(label)\n+ else:\n+ flow.environment.labels.update(label)\n \n- flow_obj.register(project_name=project)\n+ flow.register(project_name=project)\n", "issue": "Set flow labels through the CLI register command\n## Current behavior\r\nRight now there is no way to add a label to a **flow** when registering it using the CLI. You only can set the label from inside the flow's code.\r\n\r\n## Proposed behavior\r\nIt would be really nice to have a `--label` parameter (just like the one we have for agents) that would allow us to add labels on the fly when registering a flow from the CLI.\r\n\r\n## Example\r\nAdding the parameter would look something like this:\r\n`prefect register flow --file my_flow.py --name My-Flow --label my-label`\r\nor this:\r\n`prefect register flow --file my_flow.py --name My-Flow -l my-label`\r\n\r\nThis could be very useful for registering flows with continuous integration depending on the environment you want to build the flow for (for example, `dev` or `prod`).\r\n\r\nThanks a lot and keep it up!\r\n\n", "before_files": [{"content": "import os\n\nimport click\n\nimport prefect\nfrom prefect.utilities.storage import extract_flow_from_file\n\n\[email protected](hidden=True)\ndef register():\n \"\"\"\n Register flows\n\n \\b\n Usage:\n $ prefect register [OBJECT]\n\n \\b\n Arguments:\n flow Register flows with a backend API\n\n \\b\n Examples:\n $ prefect register flow --file my_flow.py --name My-Flow\n \"\"\"\n\n\[email protected](\n hidden=True,\n context_settings=dict(ignore_unknown_options=True, allow_extra_args=True),\n)\[email protected](\n \"--file\",\n \"-f\",\n required=True,\n help=\"A file that contains a flow\",\n hidden=True,\n default=None,\n type=click.Path(exists=True),\n)\[email protected](\n \"--name\",\n \"-n\",\n required=False,\n help=\"The `flow.name` to pull out of the file provided.\",\n hidden=True,\n default=None,\n)\[email protected](\n \"--project\",\n \"-p\",\n required=False,\n help=\"The name of a Prefect project to register this flow.\",\n hidden=True,\n default=None,\n)\ndef flow(file, name, project):\n \"\"\"\n Register a flow from a file. This call will pull a Flow object out of a `.py` file\n and call `flow.register` on it.\n\n \\b\n Options:\n --file, -f TEXT The path to a local file which contains a flow [required]\n --name, -n TEXT The `flow.name` to pull out of the file provided. If a name\n is not provided then the first flow object found will be registered.\n --project TEXT The name of a Prefect project to register this flow\n\n \\b\n Examples:\n $ prefect register flow --file my_flow.py --name My-Flow\n \"\"\"\n\n # Don't run extra `run` and `register` functions inside file\n with prefect.context({\"loading_flow\": True}):\n file_path = os.path.abspath(file)\n flow_obj = extract_flow_from_file(file_path=file_path, flow_name=name)\n\n flow_obj.register(project_name=project)\n", "path": "src/prefect/cli/register.py"}]}
1,354
491
gh_patches_debug_17801
rasdani/github-patches
git_diff
apache__airflow-9759
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Replace flask_oauthlib with Authlib **Description** flask_oauthlib has been deprecated in favour of Authlib. It would be good if airflow starts using Authlib **Use case / motivation** FlaskAppBuilder is now using Authlib. Since FlaskAppBuilder is deeply integrated into Airflow, it will be good to also have this Authlib. Flask-oauthlib documentation recommends Authlib **Related Issues** </issue> <code> [start of airflow/config_templates/default_webserver_config.py] 1 # 2 # Licensed to the Apache Software Foundation (ASF) under one 3 # or more contributor license agreements. See the NOTICE file 4 # distributed with this work for additional information 5 # regarding copyright ownership. The ASF licenses this file 6 # to you under the Apache License, Version 2.0 (the 7 # "License"); you may not use this file except in compliance 8 # with the License. You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, 13 # software distributed under the License is distributed on an 14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 # KIND, either express or implied. See the License for the 16 # specific language governing permissions and limitations 17 # under the License. 18 """Default configuration for the Airflow webserver""" 19 import os 20 21 from flask_appbuilder.security.manager import AUTH_DB 22 23 from airflow.configuration import conf 24 25 # from flask_appbuilder.security.manager import AUTH_LDAP 26 # from flask_appbuilder.security.manager import AUTH_OAUTH 27 # from flask_appbuilder.security.manager import AUTH_OID 28 # from flask_appbuilder.security.manager import AUTH_REMOTE_USER 29 30 31 basedir = os.path.abspath(os.path.dirname(__file__)) 32 33 # The SQLAlchemy connection string. 34 SQLALCHEMY_DATABASE_URI = conf.get('core', 'SQL_ALCHEMY_CONN') 35 36 # Flask-WTF flag for CSRF 37 WTF_CSRF_ENABLED = True 38 39 # ---------------------------------------------------- 40 # AUTHENTICATION CONFIG 41 # ---------------------------------------------------- 42 # For details on how to set up each of the following authentication, see 43 # http://flask-appbuilder.readthedocs.io/en/latest/security.html# authentication-methods 44 # for details. 45 46 # The authentication type 47 # AUTH_OID : Is for OpenID 48 # AUTH_DB : Is for database 49 # AUTH_LDAP : Is for LDAP 50 # AUTH_REMOTE_USER : Is for using REMOTE_USER from web server 51 # AUTH_OAUTH : Is for OAuth 52 AUTH_TYPE = AUTH_DB 53 54 # Uncomment to setup Full admin role name 55 # AUTH_ROLE_ADMIN = 'Admin' 56 57 # Uncomment to setup Public role name, no authentication needed 58 # AUTH_ROLE_PUBLIC = 'Public' 59 60 # Will allow user self registration 61 # AUTH_USER_REGISTRATION = True 62 63 # The default user self registration role 64 # AUTH_USER_REGISTRATION_ROLE = "Public" 65 66 # When using OAuth Auth, uncomment to setup provider(s) info 67 # Google OAuth example: 68 # OAUTH_PROVIDERS = [{ 69 # 'name':'google', 70 # 'token_key':'access_token', 71 # 'icon':'fa-google', 72 # 'remote_app': { 73 # 'base_url':'https://www.googleapis.com/oauth2/v2/', 74 # 'request_token_params':{ 75 # 'scope': 'email profile' 76 # }, 77 # 'access_token_url':'https://accounts.google.com/o/oauth2/token', 78 # 'authorize_url':'https://accounts.google.com/o/oauth2/auth', 79 # 'request_token_url': None, 80 # 'consumer_key': CONSUMER_KEY, 81 # 'consumer_secret': SECRET_KEY, 82 # } 83 # }] 84 85 # When using LDAP Auth, setup the ldap server 86 # AUTH_LDAP_SERVER = "ldap://ldapserver.new" 87 88 # When using OpenID Auth, uncomment to setup OpenID providers. 89 # example for OpenID authentication 90 # OPENID_PROVIDERS = [ 91 # { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' }, 92 # { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' }, 93 # { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' }, 94 # { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }] 95 96 # ---------------------------------------------------- 97 # Theme CONFIG 98 # ---------------------------------------------------- 99 # Flask App Builder comes up with a number of predefined themes 100 # that you can use for Apache Airflow. 101 # http://flask-appbuilder.readthedocs.io/en/latest/customizing.html#changing-themes 102 # Please make sure to remove "navbar_color" configuration from airflow.cfg 103 # in order to fully utilize the theme. (or use that property in conjunction with theme) 104 # APP_THEME = "bootstrap-theme.css" # default bootstrap 105 # APP_THEME = "amelia.css" 106 # APP_THEME = "cerulean.css" 107 # APP_THEME = "cosmo.css" 108 # APP_THEME = "cyborg.css" 109 # APP_THEME = "darkly.css" 110 # APP_THEME = "flatly.css" 111 # APP_THEME = "journal.css" 112 # APP_THEME = "lumen.css" 113 # APP_THEME = "paper.css" 114 # APP_THEME = "readable.css" 115 # APP_THEME = "sandstone.css" 116 # APP_THEME = "simplex.css" 117 # APP_THEME = "slate.css" 118 # APP_THEME = "solar.css" 119 # APP_THEME = "spacelab.css" 120 # APP_THEME = "superhero.css" 121 # APP_THEME = "united.css" 122 # APP_THEME = "yeti.css" 123 [end of airflow/config_templates/default_webserver_config.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/airflow/config_templates/default_webserver_config.py b/airflow/config_templates/default_webserver_config.py --- a/airflow/config_templates/default_webserver_config.py +++ b/airflow/config_templates/default_webserver_config.py @@ -70,15 +70,15 @@ # 'token_key':'access_token', # 'icon':'fa-google', # 'remote_app': { -# 'base_url':'https://www.googleapis.com/oauth2/v2/', -# 'request_token_params':{ +# 'api_base_url':'https://www.googleapis.com/oauth2/v2/', +# 'client_kwargs':{ # 'scope': 'email profile' # }, # 'access_token_url':'https://accounts.google.com/o/oauth2/token', # 'authorize_url':'https://accounts.google.com/o/oauth2/auth', # 'request_token_url': None, -# 'consumer_key': CONSUMER_KEY, -# 'consumer_secret': SECRET_KEY, +# 'client_id': GOOGLE_KEY, +# 'client_secret': GOOGLE_SECRET_KEY, # } # }]
{"golden_diff": "diff --git a/airflow/config_templates/default_webserver_config.py b/airflow/config_templates/default_webserver_config.py\n--- a/airflow/config_templates/default_webserver_config.py\n+++ b/airflow/config_templates/default_webserver_config.py\n@@ -70,15 +70,15 @@\n # 'token_key':'access_token',\n # 'icon':'fa-google',\n # 'remote_app': {\n-# 'base_url':'https://www.googleapis.com/oauth2/v2/',\n-# 'request_token_params':{\n+# 'api_base_url':'https://www.googleapis.com/oauth2/v2/',\n+# 'client_kwargs':{\n # 'scope': 'email profile'\n # },\n # 'access_token_url':'https://accounts.google.com/o/oauth2/token',\n # 'authorize_url':'https://accounts.google.com/o/oauth2/auth',\n # 'request_token_url': None,\n-# 'consumer_key': CONSUMER_KEY,\n-# 'consumer_secret': SECRET_KEY,\n+# 'client_id': GOOGLE_KEY,\n+# 'client_secret': GOOGLE_SECRET_KEY,\n # }\n # }]\n", "issue": "Replace flask_oauthlib with Authlib\n\r\n**Description**\r\n\r\nflask_oauthlib has been deprecated in favour of Authlib. It would be good if airflow starts using Authlib\r\n\r\n**Use case / motivation**\r\n\r\nFlaskAppBuilder is now using Authlib. \r\nSince FlaskAppBuilder is deeply integrated into Airflow, it will be good to also have this Authlib. Flask-oauthlib documentation recommends Authlib\r\n\r\n**Related Issues**\r\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Default configuration for the Airflow webserver\"\"\"\nimport os\n\nfrom flask_appbuilder.security.manager import AUTH_DB\n\nfrom airflow.configuration import conf\n\n# from flask_appbuilder.security.manager import AUTH_LDAP\n# from flask_appbuilder.security.manager import AUTH_OAUTH\n# from flask_appbuilder.security.manager import AUTH_OID\n# from flask_appbuilder.security.manager import AUTH_REMOTE_USER\n\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n# The SQLAlchemy connection string.\nSQLALCHEMY_DATABASE_URI = conf.get('core', 'SQL_ALCHEMY_CONN')\n\n# Flask-WTF flag for CSRF\nWTF_CSRF_ENABLED = True\n\n# ----------------------------------------------------\n# AUTHENTICATION CONFIG\n# ----------------------------------------------------\n# For details on how to set up each of the following authentication, see\n# http://flask-appbuilder.readthedocs.io/en/latest/security.html# authentication-methods\n# for details.\n\n# The authentication type\n# AUTH_OID : Is for OpenID\n# AUTH_DB : Is for database\n# AUTH_LDAP : Is for LDAP\n# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server\n# AUTH_OAUTH : Is for OAuth\nAUTH_TYPE = AUTH_DB\n\n# Uncomment to setup Full admin role name\n# AUTH_ROLE_ADMIN = 'Admin'\n\n# Uncomment to setup Public role name, no authentication needed\n# AUTH_ROLE_PUBLIC = 'Public'\n\n# Will allow user self registration\n# AUTH_USER_REGISTRATION = True\n\n# The default user self registration role\n# AUTH_USER_REGISTRATION_ROLE = \"Public\"\n\n# When using OAuth Auth, uncomment to setup provider(s) info\n# Google OAuth example:\n# OAUTH_PROVIDERS = [{\n# 'name':'google',\n# 'token_key':'access_token',\n# 'icon':'fa-google',\n# 'remote_app': {\n# 'base_url':'https://www.googleapis.com/oauth2/v2/',\n# 'request_token_params':{\n# 'scope': 'email profile'\n# },\n# 'access_token_url':'https://accounts.google.com/o/oauth2/token',\n# 'authorize_url':'https://accounts.google.com/o/oauth2/auth',\n# 'request_token_url': None,\n# 'consumer_key': CONSUMER_KEY,\n# 'consumer_secret': SECRET_KEY,\n# }\n# }]\n\n# When using LDAP Auth, setup the ldap server\n# AUTH_LDAP_SERVER = \"ldap://ldapserver.new\"\n\n# When using OpenID Auth, uncomment to setup OpenID providers.\n# example for OpenID authentication\n# OPENID_PROVIDERS = [\n# { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },\n# { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },\n# { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },\n# { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]\n\n# ----------------------------------------------------\n# Theme CONFIG\n# ----------------------------------------------------\n# Flask App Builder comes up with a number of predefined themes\n# that you can use for Apache Airflow.\n# http://flask-appbuilder.readthedocs.io/en/latest/customizing.html#changing-themes\n# Please make sure to remove \"navbar_color\" configuration from airflow.cfg\n# in order to fully utilize the theme. (or use that property in conjunction with theme)\n# APP_THEME = \"bootstrap-theme.css\" # default bootstrap\n# APP_THEME = \"amelia.css\"\n# APP_THEME = \"cerulean.css\"\n# APP_THEME = \"cosmo.css\"\n# APP_THEME = \"cyborg.css\"\n# APP_THEME = \"darkly.css\"\n# APP_THEME = \"flatly.css\"\n# APP_THEME = \"journal.css\"\n# APP_THEME = \"lumen.css\"\n# APP_THEME = \"paper.css\"\n# APP_THEME = \"readable.css\"\n# APP_THEME = \"sandstone.css\"\n# APP_THEME = \"simplex.css\"\n# APP_THEME = \"slate.css\"\n# APP_THEME = \"solar.css\"\n# APP_THEME = \"spacelab.css\"\n# APP_THEME = \"superhero.css\"\n# APP_THEME = \"united.css\"\n# APP_THEME = \"yeti.css\"\n", "path": "airflow/config_templates/default_webserver_config.py"}]}
1,961
240
gh_patches_debug_4985
rasdani/github-patches
git_diff
spack__spack-2022
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `spack checksum` finds wrong URLs I was having some problems getting spack to find the correct URL for files. With these settings ``` python homepage = "http://fishshell.com/" url = "http://fishshell.com/files/2.2.0/fish-2.2.0.tar.gz" list_url = homepage ``` I get the following result (with wrong URLs): ``` sh $ spack checksum fish ==> Found 5 versions of fish. 2.2.0 http://fishshell.com/fish-2.2.0.tar.gz 2.1.2 http://fishshell.com/fish-2.1.2.tar.gz 2.1.1 http://fishshell.com/fish-2.1.1.tar.gz 2.1.0 http://fishshell.com/fish-2.1.0.tar.gz 2.0.0 http://fishshell.com/fish-2.0.0.tar.gz How many would you like to checksum? (default is 5, q to abort) ==> Downloading... ==> Trying to fetch from http://fishshell.com/fish-2.2.0.tar.gz curl: (22) The requested URL returned error: 404 Not Found ==> Fetching from http://fishshell.com/fish-2.2.0.tar.gz failed. ==> Error: All fetchers failed for spack-stage-Slflbn ``` </issue> <code> [start of var/spack/repos/builtin/packages/fish/package.py] 1 ############################################################################## 2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. 3 # Produced at the Lawrence Livermore National Laboratory. 4 # 5 # This file is part of Spack. 6 # Created by Todd Gamblin, [email protected], All rights reserved. 7 # LLNL-CODE-647188 8 # 9 # For details, see https://github.com/llnl/spack 10 # Please also see the LICENSE file for our notice and the LGPL. 11 # 12 # This program is free software; you can redistribute it and/or modify 13 # it under the terms of the GNU Lesser General Public License (as 14 # published by the Free Software Foundation) version 2.1, February 1999. 15 # 16 # This program is distributed in the hope that it will be useful, but 17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF 18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and 19 # conditions of the GNU Lesser General Public License for more details. 20 # 21 # You should have received a copy of the GNU Lesser General Public 22 # License along with this program; if not, write to the Free Software 23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 ############################################################################## 25 from spack import * 26 27 28 class Fish(Package): 29 """fish is a smart and user-friendly command line shell for OS X, Linux, and 30 the rest of the family. 31 """ 32 33 homepage = "http://fishshell.com/" 34 url = "http://fishshell.com/files/2.2.0/fish-2.2.0.tar.gz" 35 list_url = "http://fishshell.com/files/" 36 list_depth = 2 37 38 version('2.2.0', 'a76339fd14ce2ec229283c53e805faac48c3e99d9e3ede9d82c0554acfc7b77a') 39 40 def install(self, spec, prefix): 41 configure('--prefix=%s' % prefix) 42 43 make() 44 make("install") 45 [end of var/spack/repos/builtin/packages/fish/package.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/var/spack/repos/builtin/packages/fish/package.py b/var/spack/repos/builtin/packages/fish/package.py --- a/var/spack/repos/builtin/packages/fish/package.py +++ b/var/spack/repos/builtin/packages/fish/package.py @@ -32,8 +32,7 @@ homepage = "http://fishshell.com/" url = "http://fishshell.com/files/2.2.0/fish-2.2.0.tar.gz" - list_url = "http://fishshell.com/files/" - list_depth = 2 + list_url = "http://fishshell.com/" version('2.2.0', 'a76339fd14ce2ec229283c53e805faac48c3e99d9e3ede9d82c0554acfc7b77a')
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/fish/package.py b/var/spack/repos/builtin/packages/fish/package.py\n--- a/var/spack/repos/builtin/packages/fish/package.py\n+++ b/var/spack/repos/builtin/packages/fish/package.py\n@@ -32,8 +32,7 @@\n \n homepage = \"http://fishshell.com/\"\n url = \"http://fishshell.com/files/2.2.0/fish-2.2.0.tar.gz\"\n- list_url = \"http://fishshell.com/files/\"\n- list_depth = 2\n+ list_url = \"http://fishshell.com/\"\n \n version('2.2.0', 'a76339fd14ce2ec229283c53e805faac48c3e99d9e3ede9d82c0554acfc7b77a')\n", "issue": "`spack checksum` finds wrong URLs\nI was having some problems getting spack to find the correct URL for files.\n\nWith these settings\n\n``` python\n homepage = \"http://fishshell.com/\"\n url = \"http://fishshell.com/files/2.2.0/fish-2.2.0.tar.gz\"\n list_url = homepage\n```\n\nI get the following result (with wrong URLs):\n\n``` sh\n$ spack checksum fish\n==> Found 5 versions of fish.\n 2.2.0 http://fishshell.com/fish-2.2.0.tar.gz\n 2.1.2 http://fishshell.com/fish-2.1.2.tar.gz\n 2.1.1 http://fishshell.com/fish-2.1.1.tar.gz\n 2.1.0 http://fishshell.com/fish-2.1.0.tar.gz\n 2.0.0 http://fishshell.com/fish-2.0.0.tar.gz\n\nHow many would you like to checksum? (default is 5, q to abort)\n==> Downloading...\n==> Trying to fetch from http://fishshell.com/fish-2.2.0.tar.gz\n\ncurl: (22) The requested URL returned error: 404 Not Found\n==> Fetching from http://fishshell.com/fish-2.2.0.tar.gz failed.\n==> Error: All fetchers failed for spack-stage-Slflbn\n```\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Fish(Package):\n \"\"\"fish is a smart and user-friendly command line shell for OS X, Linux, and\n the rest of the family.\n \"\"\"\n\n homepage = \"http://fishshell.com/\"\n url = \"http://fishshell.com/files/2.2.0/fish-2.2.0.tar.gz\"\n list_url = \"http://fishshell.com/files/\"\n list_depth = 2\n\n version('2.2.0', 'a76339fd14ce2ec229283c53e805faac48c3e99d9e3ede9d82c0554acfc7b77a')\n\n def install(self, spec, prefix):\n configure('--prefix=%s' % prefix)\n\n make()\n make(\"install\")\n", "path": "var/spack/repos/builtin/packages/fish/package.py"}]}
1,434
208
gh_patches_debug_19106
rasdani/github-patches
git_diff
bridgecrewio__checkov-2319
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CKV_AWS_40: failure even when not setting users **Describe the issue** The check fails when executing checkov on Terraform plan's JSON output. **Examples** ``` resource "aws_iam_policy_attachment" "attachment" { ... roles = [...] # no users } ``` JSON: ``` { "address": "aws_iam_policy_attachment.attachment", ... "values": { ... "roles": [ "data-analytics@eng-0" ], "users": [] }, ... ``` The `users` field is set to `[]` in JSON, and the [check implementation](https://github.com/bridgecrewio/checkov/blob/e2538c48df14363d6ed46e5b838e19cc71ba6cbf/checkov/terraform/checks/resource/base_resource_negative_value_check.py#L39-L53) doesn't handle this scenario correctly: https://github.com/bridgecrewio/checkov/blob/e2538c48df14363d6ed46e5b838e19cc71ba6cbf/checkov/terraform/checks/resource/base_resource_negative_value_check.py#L39-L53 **Version (please complete the following information):** ``` > checkov --version 2.0.780 ``` I saw there are tests for the check implementation, but only targeting Terraform source files. Are there tests targeting JSON output from Terraform plan? </issue> <code> [start of checkov/terraform/checks/resource/base_resource_negative_value_check.py] 1 from abc import abstractmethod 2 from collections.abc import Iterable 3 from typing import List, Dict, Any, Optional 4 5 import dpath 6 7 from checkov.common.models.consts import ANY_VALUE 8 from checkov.common.models.enums import CheckResult, CheckCategories 9 from checkov.common.util.type_forcers import force_list 10 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 11 from checkov.terraform.graph_builder.utils import get_referenced_vertices_in_value 12 from checkov.terraform.parser_functions import handle_dynamic_values 13 14 15 class BaseResourceNegativeValueCheck(BaseResourceCheck): 16 def __init__( 17 self, 18 name: str, 19 id: str, 20 categories: "Iterable[CheckCategories]", 21 supported_resources: "Iterable[str]", 22 missing_attribute_result: CheckResult = CheckResult.PASSED, 23 ) -> None: 24 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 25 self.missing_attribute_result = missing_attribute_result 26 27 def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult: 28 handle_dynamic_values(conf) 29 30 excluded_key = self.get_excluded_key() 31 if excluded_key is not None: 32 if dpath.search(conf, excluded_key) != {}: 33 value = dpath.get(conf, excluded_key) 34 if isinstance(value, list) and len(value) == 1: 35 value = value[0] 36 if self.check_excluded_condition(value): 37 return CheckResult.PASSED 38 39 inspected_key = self.get_inspected_key() 40 bad_values = self.get_forbidden_values() 41 if dpath.search(conf, inspected_key) != {}: 42 value = dpath.get(conf, inspected_key) 43 if isinstance(value, list) and len(value) == 1: 44 value = value[0] 45 if get_referenced_vertices_in_value(value=value, aliases={}, resources_types=[]): 46 # we don't provide resources_types as we want to stay provider agnostic 47 return CheckResult.UNKNOWN 48 if value is None: 49 return self.missing_attribute_result 50 if value in bad_values or ANY_VALUE in bad_values: 51 return CheckResult.FAILED 52 else: 53 return CheckResult.PASSED 54 55 return self.missing_attribute_result 56 57 @abstractmethod 58 def get_inspected_key(self) -> str: 59 """ 60 :return: JSONPath syntax path of the checked attribute 61 """ 62 raise NotImplementedError() 63 64 @abstractmethod 65 def get_forbidden_values(self) -> List[Any]: 66 """ 67 Returns a list of vulnerable values for the inspected key, governed by provider best practices 68 """ 69 raise NotImplementedError() 70 71 def get_excluded_key(self) -> Optional[str]: 72 """ 73 :return: JSONPath syntax path of the an attribute that provides exclusion condition for the inspected key 74 """ 75 return None 76 77 def check_excluded_condition(self, value: str) -> bool: 78 """ 79 :param: value: value for excluded_key 80 :return: True if the value should exclude the check from failing if the inspected key has a bad value 81 """ 82 return False 83 84 def get_evaluated_keys(self) -> List[str]: 85 return force_list(self.get_inspected_key()) 86 [end of checkov/terraform/checks/resource/base_resource_negative_value_check.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/base_resource_negative_value_check.py b/checkov/terraform/checks/resource/base_resource_negative_value_check.py --- a/checkov/terraform/checks/resource/base_resource_negative_value_check.py +++ b/checkov/terraform/checks/resource/base_resource_negative_value_check.py @@ -42,11 +42,11 @@ value = dpath.get(conf, inspected_key) if isinstance(value, list) and len(value) == 1: value = value[0] + if value is None or (isinstance(value, list) and not value): + return self.missing_attribute_result if get_referenced_vertices_in_value(value=value, aliases={}, resources_types=[]): # we don't provide resources_types as we want to stay provider agnostic return CheckResult.UNKNOWN - if value is None: - return self.missing_attribute_result if value in bad_values or ANY_VALUE in bad_values: return CheckResult.FAILED else:
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/base_resource_negative_value_check.py b/checkov/terraform/checks/resource/base_resource_negative_value_check.py\n--- a/checkov/terraform/checks/resource/base_resource_negative_value_check.py\n+++ b/checkov/terraform/checks/resource/base_resource_negative_value_check.py\n@@ -42,11 +42,11 @@\n value = dpath.get(conf, inspected_key)\n if isinstance(value, list) and len(value) == 1:\n value = value[0]\n+ if value is None or (isinstance(value, list) and not value):\n+ return self.missing_attribute_result\n if get_referenced_vertices_in_value(value=value, aliases={}, resources_types=[]):\n # we don't provide resources_types as we want to stay provider agnostic\n return CheckResult.UNKNOWN\n- if value is None:\n- return self.missing_attribute_result\n if value in bad_values or ANY_VALUE in bad_values:\n return CheckResult.FAILED\n else:\n", "issue": "CKV_AWS_40: failure even when not setting users\n**Describe the issue**\r\nThe check fails when executing checkov on Terraform plan's JSON output.\r\n\r\n**Examples**\r\n```\r\nresource \"aws_iam_policy_attachment\" \"attachment\" {\r\n ...\r\n roles = [...]\r\n # no users\r\n}\r\n```\r\nJSON:\r\n```\r\n{\r\n \"address\": \"aws_iam_policy_attachment.attachment\",\r\n ...\r\n \"values\": {\r\n ...\r\n \"roles\": [\r\n \"data-analytics@eng-0\"\r\n ],\r\n \"users\": []\r\n },\r\n...\r\n```\r\nThe `users` field is set to `[]` in JSON, and the [check implementation](https://github.com/bridgecrewio/checkov/blob/e2538c48df14363d6ed46e5b838e19cc71ba6cbf/checkov/terraform/checks/resource/base_resource_negative_value_check.py#L39-L53) doesn't handle this scenario correctly:\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/e2538c48df14363d6ed46e5b838e19cc71ba6cbf/checkov/terraform/checks/resource/base_resource_negative_value_check.py#L39-L53\r\n\r\n**Version (please complete the following information):**\r\n```\r\n> checkov --version\r\n2.0.780\r\n```\r\n\r\nI saw there are tests for the check implementation, but only targeting Terraform source files. Are there tests targeting JSON output from Terraform plan?\n", "before_files": [{"content": "from abc import abstractmethod\nfrom collections.abc import Iterable\nfrom typing import List, Dict, Any, Optional\n\nimport dpath\n\nfrom checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.util.type_forcers import force_list\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.terraform.graph_builder.utils import get_referenced_vertices_in_value\nfrom checkov.terraform.parser_functions import handle_dynamic_values\n\n\nclass BaseResourceNegativeValueCheck(BaseResourceCheck):\n def __init__(\n self,\n name: str,\n id: str,\n categories: \"Iterable[CheckCategories]\",\n supported_resources: \"Iterable[str]\",\n missing_attribute_result: CheckResult = CheckResult.PASSED,\n ) -> None:\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n self.missing_attribute_result = missing_attribute_result\n\n def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n handle_dynamic_values(conf)\n\n excluded_key = self.get_excluded_key()\n if excluded_key is not None:\n if dpath.search(conf, excluded_key) != {}:\n value = dpath.get(conf, excluded_key)\n if isinstance(value, list) and len(value) == 1:\n value = value[0]\n if self.check_excluded_condition(value):\n return CheckResult.PASSED\n\n inspected_key = self.get_inspected_key()\n bad_values = self.get_forbidden_values()\n if dpath.search(conf, inspected_key) != {}:\n value = dpath.get(conf, inspected_key)\n if isinstance(value, list) and len(value) == 1:\n value = value[0]\n if get_referenced_vertices_in_value(value=value, aliases={}, resources_types=[]):\n # we don't provide resources_types as we want to stay provider agnostic\n return CheckResult.UNKNOWN\n if value is None:\n return self.missing_attribute_result\n if value in bad_values or ANY_VALUE in bad_values:\n return CheckResult.FAILED\n else:\n return CheckResult.PASSED\n\n return self.missing_attribute_result\n\n @abstractmethod\n def get_inspected_key(self) -> str:\n \"\"\"\n :return: JSONPath syntax path of the checked attribute\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def get_forbidden_values(self) -> List[Any]:\n \"\"\"\n Returns a list of vulnerable values for the inspected key, governed by provider best practices\n \"\"\"\n raise NotImplementedError()\n\n def get_excluded_key(self) -> Optional[str]:\n \"\"\"\n :return: JSONPath syntax path of the an attribute that provides exclusion condition for the inspected key\n \"\"\"\n return None\n\n def check_excluded_condition(self, value: str) -> bool:\n \"\"\"\n :param: value: value for excluded_key\n :return: True if the value should exclude the check from failing if the inspected key has a bad value\n \"\"\"\n return False\n\n def get_evaluated_keys(self) -> List[str]:\n return force_list(self.get_inspected_key())\n", "path": "checkov/terraform/checks/resource/base_resource_negative_value_check.py"}]}
1,735
219
gh_patches_debug_30698
rasdani/github-patches
git_diff
digitalfabrik__integreat-cms-602
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Show latest feedback in dashboard Show admins and content creators the latest feedback from app users in dashboard. This should be a list of the last ~5 messages. In some cases, the feedback only contains a thumbs up or down, in other cases it can contain a message. The title of the page or event concerned should be displayed as well and linked to the editing page. Additional option: Add link that sends the message string to translate.google.com or deepl.com for translation. This can be useful if the feedback is given in a language the back end user does not understand. It is perfectly fine, if the link opens the translation website in a new tab. No need to fetch a translation via the API. </issue> <code> [start of src/cms/views/dashboard/admin_dashboard_view.py] 1 from django.contrib.auth.decorators import login_required 2 from django.shortcuts import render 3 from django.utils.decorators import method_decorator 4 from django.views.generic import TemplateView 5 6 from ...decorators import staff_required 7 8 9 @method_decorator(login_required, name="dispatch") 10 @method_decorator(staff_required, name="dispatch") 11 class AdminDashboardView(TemplateView): 12 """ 13 View for the admin dashboard 14 """ 15 16 #: The template to render (see :class:`~django.views.generic.base.TemplateResponseMixin`) 17 template_name = "dashboard/admin_dashboard.html" 18 #: The context dict passed to the template (see :class:`~django.views.generic.base.ContextMixin`) 19 base_context = {"current_menu_item": "admin_dashboard"} 20 21 def get(self, request, *args, **kwargs): 22 """ 23 Render admin dashboard 24 25 :param request: Object representing the user call 26 :type request: ~django.http.HttpRequest 27 28 :param args: The supplied arguments 29 :type args: list 30 31 :param kwargs: The supplied keyword arguments 32 :type kwargs: dict 33 34 :return: The rendered template response 35 :rtype: ~django.template.response.TemplateResponse 36 """ 37 38 val = "To be defined" 39 return render(request, self.template_name, {"key": val, **self.base_context}) 40 [end of src/cms/views/dashboard/admin_dashboard_view.py] [start of src/cms/views/dashboard/dashboard_view.py] 1 import html 2 from urllib.parse import urlparse 3 import feedparser 4 5 from django.contrib.auth.decorators import login_required 6 from django.shortcuts import render 7 from django.utils import translation 8 from django.utils.decorators import method_decorator 9 from django.views.generic import TemplateView 10 11 from backend.settings import RSS_FEED_URLS 12 from ...decorators import region_permission_required 13 14 15 @method_decorator(login_required, name="dispatch") 16 @method_decorator(region_permission_required, name="dispatch") 17 class DashboardView(TemplateView): 18 """ 19 View for the region dashboard 20 """ 21 22 #: The template to render (see :class:`~django.views.generic.base.TemplateResponseMixin`) 23 template_name = "dashboard/dashboard.html" 24 #: The context dict passed to the template (see :class:`~django.views.generic.base.ContextMixin`) 25 base_context = {"current_menu_item": "region_dashboard"} 26 27 def get(self, request, *args, **kwargs): 28 """ 29 Render the region dashboard 30 31 :param request: Object representing the user call 32 :type request: ~django.http.HttpRequest 33 34 :param args: The supplied arguments 35 :type args: list 36 37 :param kwargs: The supplied keyword arguments 38 :type kwargs: dict 39 40 :return: The rendered template response 41 :rtype: ~django.template.response.TemplateResponse 42 """ 43 44 val = "To be defined" 45 language_code = translation.get_language() 46 feed = feedparser.parse(RSS_FEED_URLS[language_code]) 47 # select five most recent feeds 48 feed["entries"] = feed["entries"][:5] 49 # decode html entities like dash and split after line break 50 for entry in feed["entries"]: 51 entry["summary"] = html.unescape(entry["summary"]).split("\n")[0] 52 domain = urlparse(RSS_FEED_URLS["home-page"]).netloc 53 return render( 54 request, 55 self.template_name, 56 { 57 "key": val, 58 **self.base_context, 59 "feed": feed, 60 "home_page": RSS_FEED_URLS["home-page"], 61 "domain": domain, 62 }, 63 ) 64 [end of src/cms/views/dashboard/dashboard_view.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cms/views/dashboard/admin_dashboard_view.py b/src/cms/views/dashboard/admin_dashboard_view.py --- a/src/cms/views/dashboard/admin_dashboard_view.py +++ b/src/cms/views/dashboard/admin_dashboard_view.py @@ -4,6 +4,7 @@ from django.views.generic import TemplateView from ...decorators import staff_required +from ...models import Feedback @method_decorator(login_required, name="dispatch") @@ -34,6 +35,14 @@ :return: The rendered template response :rtype: ~django.template.response.TemplateResponse """ - - val = "To be defined" - return render(request, self.template_name, {"key": val, **self.base_context}) + all_feedback = Feedback.objects.filter(is_technical=True)[:5] + + return render( + request, + self.template_name, + { + "current_menu_item": "admin_feedback", + "all_feedback": all_feedback, + **self.base_context, + }, + ) diff --git a/src/cms/views/dashboard/dashboard_view.py b/src/cms/views/dashboard/dashboard_view.py --- a/src/cms/views/dashboard/dashboard_view.py +++ b/src/cms/views/dashboard/dashboard_view.py @@ -41,7 +41,6 @@ :rtype: ~django.template.response.TemplateResponse """ - val = "To be defined" language_code = translation.get_language() feed = feedparser.parse(RSS_FEED_URLS[language_code]) # select five most recent feeds @@ -54,7 +53,6 @@ request, self.template_name, { - "key": val, **self.base_context, "feed": feed, "home_page": RSS_FEED_URLS["home-page"],
{"golden_diff": "diff --git a/src/cms/views/dashboard/admin_dashboard_view.py b/src/cms/views/dashboard/admin_dashboard_view.py\n--- a/src/cms/views/dashboard/admin_dashboard_view.py\n+++ b/src/cms/views/dashboard/admin_dashboard_view.py\n@@ -4,6 +4,7 @@\n from django.views.generic import TemplateView\n \n from ...decorators import staff_required\n+from ...models import Feedback\n \n \n @method_decorator(login_required, name=\"dispatch\")\n@@ -34,6 +35,14 @@\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n-\n- val = \"To be defined\"\n- return render(request, self.template_name, {\"key\": val, **self.base_context})\n+ all_feedback = Feedback.objects.filter(is_technical=True)[:5]\n+\n+ return render(\n+ request,\n+ self.template_name,\n+ {\n+ \"current_menu_item\": \"admin_feedback\",\n+ \"all_feedback\": all_feedback,\n+ **self.base_context,\n+ },\n+ )\ndiff --git a/src/cms/views/dashboard/dashboard_view.py b/src/cms/views/dashboard/dashboard_view.py\n--- a/src/cms/views/dashboard/dashboard_view.py\n+++ b/src/cms/views/dashboard/dashboard_view.py\n@@ -41,7 +41,6 @@\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n \n- val = \"To be defined\"\n language_code = translation.get_language()\n feed = feedparser.parse(RSS_FEED_URLS[language_code])\n # select five most recent feeds\n@@ -54,7 +53,6 @@\n request,\n self.template_name,\n {\n- \"key\": val,\n **self.base_context,\n \"feed\": feed,\n \"home_page\": RSS_FEED_URLS[\"home-page\"],\n", "issue": "Show latest feedback in dashboard\nShow admins and content creators the latest feedback from app users in dashboard. This should be a list of the last ~5 messages. In some cases, the feedback only contains a thumbs up or down, in other cases it can contain a message. The title of the page or event concerned should be displayed as well and linked to the editing page.\r\n\r\nAdditional option: Add link that sends the message string to translate.google.com or deepl.com for translation. This can be useful if the feedback is given in a language the back end user does not understand. It is perfectly fine, if the link opens the translation website in a new tab. No need to fetch a translation via the API.\n", "before_files": [{"content": "from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import TemplateView\n\nfrom ...decorators import staff_required\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(staff_required, name=\"dispatch\")\nclass AdminDashboardView(TemplateView):\n \"\"\"\n View for the admin dashboard\n \"\"\"\n\n #: The template to render (see :class:`~django.views.generic.base.TemplateResponseMixin`)\n template_name = \"dashboard/admin_dashboard.html\"\n #: The context dict passed to the template (see :class:`~django.views.generic.base.ContextMixin`)\n base_context = {\"current_menu_item\": \"admin_dashboard\"}\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Render admin dashboard\n\n :param request: Object representing the user call\n :type request: ~django.http.HttpRequest\n\n :param args: The supplied arguments\n :type args: list\n\n :param kwargs: The supplied keyword arguments\n :type kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n\n val = \"To be defined\"\n return render(request, self.template_name, {\"key\": val, **self.base_context})\n", "path": "src/cms/views/dashboard/admin_dashboard_view.py"}, {"content": "import html\nfrom urllib.parse import urlparse\nimport feedparser\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom django.utils import translation\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import TemplateView\n\nfrom backend.settings import RSS_FEED_URLS\nfrom ...decorators import region_permission_required\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(region_permission_required, name=\"dispatch\")\nclass DashboardView(TemplateView):\n \"\"\"\n View for the region dashboard\n \"\"\"\n\n #: The template to render (see :class:`~django.views.generic.base.TemplateResponseMixin`)\n template_name = \"dashboard/dashboard.html\"\n #: The context dict passed to the template (see :class:`~django.views.generic.base.ContextMixin`)\n base_context = {\"current_menu_item\": \"region_dashboard\"}\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Render the region dashboard\n\n :param request: Object representing the user call\n :type request: ~django.http.HttpRequest\n\n :param args: The supplied arguments\n :type args: list\n\n :param kwargs: The supplied keyword arguments\n :type kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n\n val = \"To be defined\"\n language_code = translation.get_language()\n feed = feedparser.parse(RSS_FEED_URLS[language_code])\n # select five most recent feeds\n feed[\"entries\"] = feed[\"entries\"][:5]\n # decode html entities like dash and split after line break\n for entry in feed[\"entries\"]:\n entry[\"summary\"] = html.unescape(entry[\"summary\"]).split(\"\\n\")[0]\n domain = urlparse(RSS_FEED_URLS[\"home-page\"]).netloc\n return render(\n request,\n self.template_name,\n {\n \"key\": val,\n **self.base_context,\n \"feed\": feed,\n \"home_page\": RSS_FEED_URLS[\"home-page\"],\n \"domain\": domain,\n },\n )\n", "path": "src/cms/views/dashboard/dashboard_view.py"}]}
1,611
381
gh_patches_debug_25733
rasdani/github-patches
git_diff
Mailu__Mailu-731
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Increase attachment size limit Hello, How can web change the max file limit of attachment in Roundcube ? Version 1.5 </issue> <code> [start of webmails/rainloop/start.py] 1 #!/usr/bin/python3 2 3 import jinja2 4 import os 5 import shutil 6 7 convert = lambda src, dst: open(dst, "w").write(jinja2.Template(open(src).read()).render(**os.environ)) 8 9 # Actual startup script 10 os.environ["FRONT_ADDRESS"] = os.environ.get("FRONT_ADDRESS", "front") 11 os.environ["IMAP_ADDRESS"] = os.environ.get("IMAP_ADDRESS", "imap") 12 13 base = "/data/_data_/_default_/" 14 shutil.rmtree(base + "domains/", ignore_errors=True) 15 os.makedirs(base + "domains", exist_ok=True) 16 os.makedirs(base + "configs", exist_ok=True) 17 18 convert("/default.ini", "/data/_data_/_default_/domains/default.ini") 19 convert("/config.ini", "/data/_data_/_default_/configs/config.ini") 20 21 os.system("chown -R www-data:www-data /data") 22 23 os.execv("/usr/local/bin/apache2-foreground", ["apache2-foreground"]) 24 25 [end of webmails/rainloop/start.py] [start of webmails/roundcube/start.py] 1 #!/usr/bin/python3 2 3 import os 4 5 # Fix some permissions 6 os.system("mkdir -p /data/gpg") 7 os.system("chown -R www-data:www-data /data") 8 9 # Run apache 10 os.execv("/usr/local/bin/apache2-foreground", ["apache2-foreground"]) [end of webmails/roundcube/start.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/webmails/rainloop/start.py b/webmails/rainloop/start.py --- a/webmails/rainloop/start.py +++ b/webmails/rainloop/start.py @@ -10,6 +10,8 @@ os.environ["FRONT_ADDRESS"] = os.environ.get("FRONT_ADDRESS", "front") os.environ["IMAP_ADDRESS"] = os.environ.get("IMAP_ADDRESS", "imap") +os.environ["MAX_FILESIZE"] = str(int(int(os.environ.get("MESSAGE_SIZE_LIMIT"))*0.66/1048576)) + base = "/data/_data_/_default_/" shutil.rmtree(base + "domains/", ignore_errors=True) os.makedirs(base + "domains", exist_ok=True) @@ -17,6 +19,7 @@ convert("/default.ini", "/data/_data_/_default_/domains/default.ini") convert("/config.ini", "/data/_data_/_default_/configs/config.ini") +convert("/php.ini", "/usr/local/etc/php/conf.d/rainloop.ini") os.system("chown -R www-data:www-data /data") diff --git a/webmails/roundcube/start.py b/webmails/roundcube/start.py --- a/webmails/roundcube/start.py +++ b/webmails/roundcube/start.py @@ -1,6 +1,13 @@ #!/usr/bin/python3 import os +import jinja2 + +convert = lambda src, dst: open(dst, "w").write(jinja2.Template(open(src).read()).render(**os.environ)) + +os.environ["MAX_FILESIZE"] = str(int(int(os.environ.get("MESSAGE_SIZE_LIMIT"))*0.66/1048576)) + +convert("/php.ini", "/usr/local/etc/php/conf.d/roundcube.ini") # Fix some permissions os.system("mkdir -p /data/gpg")
{"golden_diff": "diff --git a/webmails/rainloop/start.py b/webmails/rainloop/start.py\n--- a/webmails/rainloop/start.py\n+++ b/webmails/rainloop/start.py\n@@ -10,6 +10,8 @@\n os.environ[\"FRONT_ADDRESS\"] = os.environ.get(\"FRONT_ADDRESS\", \"front\")\n os.environ[\"IMAP_ADDRESS\"] = os.environ.get(\"IMAP_ADDRESS\", \"imap\")\n \n+os.environ[\"MAX_FILESIZE\"] = str(int(int(os.environ.get(\"MESSAGE_SIZE_LIMIT\"))*0.66/1048576))\n+\n base = \"/data/_data_/_default_/\"\n shutil.rmtree(base + \"domains/\", ignore_errors=True)\n os.makedirs(base + \"domains\", exist_ok=True)\n@@ -17,6 +19,7 @@\n \n convert(\"/default.ini\", \"/data/_data_/_default_/domains/default.ini\")\n convert(\"/config.ini\", \"/data/_data_/_default_/configs/config.ini\")\n+convert(\"/php.ini\", \"/usr/local/etc/php/conf.d/rainloop.ini\")\n \n os.system(\"chown -R www-data:www-data /data\")\n \ndiff --git a/webmails/roundcube/start.py b/webmails/roundcube/start.py\n--- a/webmails/roundcube/start.py\n+++ b/webmails/roundcube/start.py\n@@ -1,6 +1,13 @@\n #!/usr/bin/python3\n \n import os\n+import jinja2\n+\n+convert = lambda src, dst: open(dst, \"w\").write(jinja2.Template(open(src).read()).render(**os.environ))\n+\n+os.environ[\"MAX_FILESIZE\"] = str(int(int(os.environ.get(\"MESSAGE_SIZE_LIMIT\"))*0.66/1048576))\n+\n+convert(\"/php.ini\", \"/usr/local/etc/php/conf.d/roundcube.ini\")\n \n # Fix some permissions\n os.system(\"mkdir -p /data/gpg\")\n", "issue": "Increase attachment size limit\nHello, \r\n\r\nHow can web change the max file limit of attachment in Roundcube ?\r\n\r\nVersion 1.5\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport jinja2\nimport os\nimport shutil\n\nconvert = lambda src, dst: open(dst, \"w\").write(jinja2.Template(open(src).read()).render(**os.environ))\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = os.environ.get(\"FRONT_ADDRESS\", \"front\")\nos.environ[\"IMAP_ADDRESS\"] = os.environ.get(\"IMAP_ADDRESS\", \"imap\")\n\nbase = \"/data/_data_/_default_/\"\nshutil.rmtree(base + \"domains/\", ignore_errors=True)\nos.makedirs(base + \"domains\", exist_ok=True)\nos.makedirs(base + \"configs\", exist_ok=True)\n\nconvert(\"/default.ini\", \"/data/_data_/_default_/domains/default.ini\")\nconvert(\"/config.ini\", \"/data/_data_/_default_/configs/config.ini\")\n\nos.system(\"chown -R www-data:www-data /data\")\n\nos.execv(\"/usr/local/bin/apache2-foreground\", [\"apache2-foreground\"])\n\n", "path": "webmails/rainloop/start.py"}, {"content": "#!/usr/bin/python3\n\nimport os\n\n# Fix some permissions\nos.system(\"mkdir -p /data/gpg\")\nos.system(\"chown -R www-data:www-data /data\")\n\n# Run apache\nos.execv(\"/usr/local/bin/apache2-foreground\", [\"apache2-foreground\"])", "path": "webmails/roundcube/start.py"}]}
919
418
gh_patches_debug_9841
rasdani/github-patches
git_diff
bridgecrewio__checkov-993
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Checkov logs to stdout making the json un-parseable **Describe the bug** When you run checkov with the `-o json` and `LOG_LEVEL=WARNING` the logs are printed to `stdout` together with the json object. This way it's hard and error prone to parse the json output. **Expected behavior** At least in `-o json` (or any parseable output) mode It should use stderr to log to and use stdout only for the parseable output and log everything else to stderr. </issue> <code> [start of checkov/logging_init.py] 1 import sys 2 3 import logging 4 import os 5 6 7 def init(): 8 LOG_LEVEL = os.environ.get('LOG_LEVEL', 'WARNING').upper() 9 logging.basicConfig(level=LOG_LEVEL) 10 logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s") 11 rootLogger = logging.getLogger() 12 consoleHandler = logging.StreamHandler(sys.stdout) 13 consoleHandler.setFormatter(logFormatter) 14 consoleHandler.setLevel(LOG_LEVEL) 15 rootLogger.addHandler(consoleHandler) 16 logging.getLogger("urllib3").setLevel(logging.ERROR) 17 logging.getLogger("urllib3.connectionpool").setLevel(logging.ERROR) 18 logging.getLogger("urllib3.connectionpool").propagate = False 19 logging.getLogger("urllib3").propagate = False 20 [end of checkov/logging_init.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/logging_init.py b/checkov/logging_init.py --- a/checkov/logging_init.py +++ b/checkov/logging_init.py @@ -9,7 +9,7 @@ logging.basicConfig(level=LOG_LEVEL) logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s") rootLogger = logging.getLogger() - consoleHandler = logging.StreamHandler(sys.stdout) + consoleHandler = logging.StreamHandler(sys.stderr) consoleHandler.setFormatter(logFormatter) consoleHandler.setLevel(LOG_LEVEL) rootLogger.addHandler(consoleHandler)
{"golden_diff": "diff --git a/checkov/logging_init.py b/checkov/logging_init.py\n--- a/checkov/logging_init.py\n+++ b/checkov/logging_init.py\n@@ -9,7 +9,7 @@\n logging.basicConfig(level=LOG_LEVEL)\n logFormatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\n rootLogger = logging.getLogger()\n- consoleHandler = logging.StreamHandler(sys.stdout)\n+ consoleHandler = logging.StreamHandler(sys.stderr)\n consoleHandler.setFormatter(logFormatter)\n consoleHandler.setLevel(LOG_LEVEL)\n rootLogger.addHandler(consoleHandler)\n", "issue": "Checkov logs to stdout making the json un-parseable\n**Describe the bug**\r\nWhen you run checkov with the `-o json` and `LOG_LEVEL=WARNING` the logs are printed to `stdout` together with the json object. This way it's hard and error prone to parse the json output.\r\n\r\n**Expected behavior**\r\nAt least in `-o json` (or any parseable output) mode It should use stderr to log to and use stdout only for the parseable output and log everything else to stderr.\r\n\n", "before_files": [{"content": "import sys\n\nimport logging\nimport os\n\n\ndef init():\n LOG_LEVEL = os.environ.get('LOG_LEVEL', 'WARNING').upper()\n logging.basicConfig(level=LOG_LEVEL)\n logFormatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\n rootLogger = logging.getLogger()\n consoleHandler = logging.StreamHandler(sys.stdout)\n consoleHandler.setFormatter(logFormatter)\n consoleHandler.setLevel(LOG_LEVEL)\n rootLogger.addHandler(consoleHandler)\n logging.getLogger(\"urllib3\").setLevel(logging.ERROR)\n logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.ERROR)\n logging.getLogger(\"urllib3.connectionpool\").propagate = False\n logging.getLogger(\"urllib3\").propagate = False\n", "path": "checkov/logging_init.py"}]}
845
138
gh_patches_debug_3547
rasdani/github-patches
git_diff
cisagov__manage.get.gov-199
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Reconfigure OIDC logout to send client_id Login.gov recently changed their logout method to take `client_id` instead of the previous parameter `id_token_hint`. We need to change our code to match. ![Screen Shot 2022-10-20 at 15 26 58](https://user-images.githubusercontent.com/443389/197051545-59d60ba9-af91-42d7-8d00-d515c9210d4c.png) @SSPJ knows this code the best of any of us. </issue> <code> [start of src/djangooidc/views.py] 1 # coding: utf-8 2 3 import logging 4 5 from django.conf import settings 6 from django.contrib.auth import logout as auth_logout 7 from django.contrib.auth import authenticate, login 8 from django.http import HttpResponseRedirect 9 from django.shortcuts import redirect, render 10 from urllib.parse import parse_qs, urlencode 11 12 from djangooidc.oidc import Client 13 from djangooidc import exceptions as o_e 14 15 16 logger = logging.getLogger(__name__) 17 18 try: 19 # Initialize provider using pyOICD 20 OP = getattr(settings, "OIDC_ACTIVE_PROVIDER") 21 CLIENT = Client(OP) 22 logger.debug("client initialized %s" % CLIENT) 23 except Exception as err: 24 CLIENT = None # type: ignore 25 logger.warning(err) 26 logger.warning("Unable to configure OpenID Connect provider. Users cannot log in.") 27 28 29 def error_page(request, error): 30 """Display a sensible message and log the error.""" 31 logger.error(error) 32 if isinstance(error, o_e.AuthenticationFailed): 33 return render( 34 request, 35 "401.html", 36 context={ 37 "friendly_message": error.friendly_message, 38 "log_identifier": error.locator, 39 }, 40 status=401, 41 ) 42 if isinstance(error, o_e.InternalError): 43 return render( 44 request, 45 "500.html", 46 context={ 47 "friendly_message": error.friendly_message, 48 "log_identifier": error.locator, 49 }, 50 status=500, 51 ) 52 if isinstance(error, Exception): 53 return render(request, "500.html", status=500) 54 55 56 def openid(request): 57 """Redirect the user to an authentication provider (OP).""" 58 request.session["next"] = request.GET.get("next", "/") 59 60 try: 61 return CLIENT.create_authn_request(request.session) 62 except Exception as err: 63 return error_page(request, err) 64 65 66 def login_callback(request): 67 """Analyze the token returned by the authentication provider (OP).""" 68 try: 69 query = parse_qs(request.GET.urlencode()) 70 userinfo = CLIENT.callback(query, request.session) 71 user = authenticate(request=request, **userinfo) 72 if user: 73 login(request, user) 74 logger.info("Successfully logged in user %s" % user) 75 return redirect(request.session.get("next", "/")) 76 else: 77 raise o_e.BannedUser() 78 except Exception as err: 79 return error_page(request, err) 80 81 82 def logout(request, next_page=None): 83 """Redirect the user to the authentication provider (OP) logout page.""" 84 try: 85 username = request.user.username 86 request_args = { 87 # it is perfectly fine to send the token, even if it is expired 88 "id_token_hint": request.session["id_token_raw"], 89 "state": request.session["state"], 90 } 91 if ( 92 "post_logout_redirect_uris" in CLIENT.registration_response.keys() 93 and len(CLIENT.registration_response["post_logout_redirect_uris"]) > 0 94 ): 95 request_args.update( 96 { 97 "post_logout_redirect_uri": CLIENT.registration_response[ 98 "post_logout_redirect_uris" 99 ][0] 100 } 101 ) 102 103 url = CLIENT.provider_info["end_session_endpoint"] 104 url += "?" + urlencode(request_args) 105 return HttpResponseRedirect(url) 106 except Exception as err: 107 return error_page(request, err) 108 finally: 109 # Always remove Django session stuff - even if not logged out from OP. 110 # Don't wait for the callback as it may never come. 111 auth_logout(request) 112 logger.info("Successfully logged out user %s" % username) 113 next_page = getattr(settings, "LOGOUT_REDIRECT_URL", None) 114 if next_page: 115 request.session["next"] = next_page 116 117 118 def logout_callback(request): 119 """Simple redirection view: after logout, redirect to `next`.""" 120 next = request.session.get("next", "/") 121 return redirect(next) 122 [end of src/djangooidc/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/djangooidc/views.py b/src/djangooidc/views.py --- a/src/djangooidc/views.py +++ b/src/djangooidc/views.py @@ -84,8 +84,7 @@ try: username = request.user.username request_args = { - # it is perfectly fine to send the token, even if it is expired - "id_token_hint": request.session["id_token_raw"], + "client_id": CLIENT.client_id, "state": request.session["state"], } if (
{"golden_diff": "diff --git a/src/djangooidc/views.py b/src/djangooidc/views.py\n--- a/src/djangooidc/views.py\n+++ b/src/djangooidc/views.py\n@@ -84,8 +84,7 @@\n try:\n username = request.user.username\n request_args = {\n- # it is perfectly fine to send the token, even if it is expired\n- \"id_token_hint\": request.session[\"id_token_raw\"],\n+ \"client_id\": CLIENT.client_id,\n \"state\": request.session[\"state\"],\n }\n if (\n", "issue": "Reconfigure OIDC logout to send client_id\nLogin.gov recently changed their logout method to take `client_id` instead of the previous parameter `id_token_hint`. We need to change our code to match.\r\n\r\n![Screen Shot 2022-10-20 at 15 26 58](https://user-images.githubusercontent.com/443389/197051545-59d60ba9-af91-42d7-8d00-d515c9210d4c.png)\r\n\r\n@SSPJ knows this code the best of any of us.\n", "before_files": [{"content": "# coding: utf-8\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import logout as auth_logout\nfrom django.contrib.auth import authenticate, login\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect, render\nfrom urllib.parse import parse_qs, urlencode\n\nfrom djangooidc.oidc import Client\nfrom djangooidc import exceptions as o_e\n\n\nlogger = logging.getLogger(__name__)\n\ntry:\n # Initialize provider using pyOICD\n OP = getattr(settings, \"OIDC_ACTIVE_PROVIDER\")\n CLIENT = Client(OP)\n logger.debug(\"client initialized %s\" % CLIENT)\nexcept Exception as err:\n CLIENT = None # type: ignore\n logger.warning(err)\n logger.warning(\"Unable to configure OpenID Connect provider. Users cannot log in.\")\n\n\ndef error_page(request, error):\n \"\"\"Display a sensible message and log the error.\"\"\"\n logger.error(error)\n if isinstance(error, o_e.AuthenticationFailed):\n return render(\n request,\n \"401.html\",\n context={\n \"friendly_message\": error.friendly_message,\n \"log_identifier\": error.locator,\n },\n status=401,\n )\n if isinstance(error, o_e.InternalError):\n return render(\n request,\n \"500.html\",\n context={\n \"friendly_message\": error.friendly_message,\n \"log_identifier\": error.locator,\n },\n status=500,\n )\n if isinstance(error, Exception):\n return render(request, \"500.html\", status=500)\n\n\ndef openid(request):\n \"\"\"Redirect the user to an authentication provider (OP).\"\"\"\n request.session[\"next\"] = request.GET.get(\"next\", \"/\")\n\n try:\n return CLIENT.create_authn_request(request.session)\n except Exception as err:\n return error_page(request, err)\n\n\ndef login_callback(request):\n \"\"\"Analyze the token returned by the authentication provider (OP).\"\"\"\n try:\n query = parse_qs(request.GET.urlencode())\n userinfo = CLIENT.callback(query, request.session)\n user = authenticate(request=request, **userinfo)\n if user:\n login(request, user)\n logger.info(\"Successfully logged in user %s\" % user)\n return redirect(request.session.get(\"next\", \"/\"))\n else:\n raise o_e.BannedUser()\n except Exception as err:\n return error_page(request, err)\n\n\ndef logout(request, next_page=None):\n \"\"\"Redirect the user to the authentication provider (OP) logout page.\"\"\"\n try:\n username = request.user.username\n request_args = {\n # it is perfectly fine to send the token, even if it is expired\n \"id_token_hint\": request.session[\"id_token_raw\"],\n \"state\": request.session[\"state\"],\n }\n if (\n \"post_logout_redirect_uris\" in CLIENT.registration_response.keys()\n and len(CLIENT.registration_response[\"post_logout_redirect_uris\"]) > 0\n ):\n request_args.update(\n {\n \"post_logout_redirect_uri\": CLIENT.registration_response[\n \"post_logout_redirect_uris\"\n ][0]\n }\n )\n\n url = CLIENT.provider_info[\"end_session_endpoint\"]\n url += \"?\" + urlencode(request_args)\n return HttpResponseRedirect(url)\n except Exception as err:\n return error_page(request, err)\n finally:\n # Always remove Django session stuff - even if not logged out from OP.\n # Don't wait for the callback as it may never come.\n auth_logout(request)\n logger.info(\"Successfully logged out user %s\" % username)\n next_page = getattr(settings, \"LOGOUT_REDIRECT_URL\", None)\n if next_page:\n request.session[\"next\"] = next_page\n\n\ndef logout_callback(request):\n \"\"\"Simple redirection view: after logout, redirect to `next`.\"\"\"\n next = request.session.get(\"next\", \"/\")\n return redirect(next)\n", "path": "src/djangooidc/views.py"}]}
1,759
124