problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_20827 | rasdani/github-patches | git_diff | shuup__shuup-742 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
System check to verify Parler sanity
Shuup should check that the Parler configuration is sane before starting.
@JsseL and @juhakujala puzzled over an unrelated exception (`'shuup.admin.modules.services.behavior_form_part.BehaviorFormSet object' has no attribute 'empty_form'`) for a while – turns out it was an `AttributeError` ([which, as we unfortunately know, are hidden within `@property`s](https://github.com/shuup/shuup/blob/5584ebf912bae415fe367ea0c00ad4c5cff49244/shuup/utils/form_group.py#L86-L100)) within `FormSet.empty_form` calls that happens due to `PARLER_DEFAULT_LANGUAGE_CODE` being undefined:
```
Traceback (most recent call last):
File "~/django/forms/formsets.py", line 187, in empty_form
empty_permitted=True,
File "~/shuup/admin/modules/services/behavior_form_part.py", line 49, in form
kwargs.setdefault("default_language", settings.PARLER_DEFAULT_LANGUAGE_CODE)
File "~/django/conf/__init__.py", line 49, in __getattr__
return getattr(self._wrapped, name)
AttributeError: 'Settings' object has no attribute 'PARLER_DEFAULT_LANGUAGE_CODE'
```
My suggestion is to add a simple system check in [ShuupCoreAppConfig.ready()](https://github.com/shuup/shuup/blob/5584ebf912bae415fe367ea0c00ad4c5cff49244/shuup/core/__init__.py#L11) that throws an exception if some of the Parler settings (`PARLER_DEFAULT_LANGUAGE_CODE` and `PARLER_LANGUAGES`) are unset -- or perhaps it could automatically derive them based on the Django `LANGUAGES` setting, as "sane defaults" go?
</issue>
<code>
[start of shuup/core/__init__.py]
1 # -*- coding: utf-8 -*-
2 # This file is part of Shuup.
3 #
4 # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
5 #
6 # This source code is licensed under the AGPLv3 license found in the
7 # LICENSE file in the root directory of this source tree.
8 from shuup.apps import AppConfig
9
10
11 class ShuupCoreAppConfig(AppConfig):
12 name = "shuup.core"
13 verbose_name = "Shuup Core"
14 label = "shuup" # Use "shuup" as app_label instead of "core"
15 required_installed_apps = (
16 "django.contrib.auth",
17 "django.contrib.contenttypes",
18 "easy_thumbnails",
19 "filer",
20 )
21 provides = {
22 "api_populator": [
23 "shuup.core.api:populate_core_api"
24 ],
25 "pricing_module": [
26 "shuup.core.pricing.default_pricing:DefaultPricingModule"
27 ],
28 }
29
30
31 default_app_config = "shuup.core.ShuupCoreAppConfig"
32
[end of shuup/core/__init__.py]
[start of shuup/core/excs.py]
1 # -*- coding: utf-8 -*-
2 # This file is part of Shuup.
3 #
4 # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
5 #
6 # This source code is licensed under the AGPLv3 license found in the
7 # LICENSE file in the root directory of this source tree.
8 from shuup.utils.excs import Problem
9
10
11 class ImmutabilityError(ValueError):
12 pass
13
14
15 class NoProductsToShipException(Exception):
16 pass
17
18
19 class NoPaymentToCreateException(Exception):
20 pass
21
22
23 class NoRefundToCreateException(Exception):
24 pass
25
26
27 class RefundExceedsAmountException(Exception):
28 pass
29
30
31 class InvalidRefundAmountException(Exception):
32 pass
33
34
35 class ProductNotOrderableProblem(Problem):
36 pass
37
38
39 class ProductNotVisibleProblem(Problem):
40 pass
41
42
43 class ImpossibleProductModeException(ValueError):
44 def __init__(self, message, code=None):
45 super(ImpossibleProductModeException, self).__init__(message)
46 self.code = code
47
[end of shuup/core/excs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/shuup/core/__init__.py b/shuup/core/__init__.py
--- a/shuup/core/__init__.py
+++ b/shuup/core/__init__.py
@@ -6,6 +6,7 @@
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.apps import AppConfig
+from shuup.core.excs import MissingSettingException
class ShuupCoreAppConfig(AppConfig):
@@ -27,5 +28,12 @@
],
}
+ def ready(self):
+ from django.conf import settings
+ if not getattr(settings, "PARLER_DEFAULT_LANGUAGE_CODE", None):
+ raise MissingSettingException("PARLER_DEFAULT_LANGUAGE_CODE must be set.")
+ if not getattr(settings, "PARLER_LANGUAGES", None):
+ raise MissingSettingException("PARLER_LANGUAGES must be set.")
+
default_app_config = "shuup.core.ShuupCoreAppConfig"
diff --git a/shuup/core/excs.py b/shuup/core/excs.py
--- a/shuup/core/excs.py
+++ b/shuup/core/excs.py
@@ -32,6 +32,10 @@
pass
+class MissingSettingException(Exception):
+ pass
+
+
class ProductNotOrderableProblem(Problem):
pass
| {"golden_diff": "diff --git a/shuup/core/__init__.py b/shuup/core/__init__.py\n--- a/shuup/core/__init__.py\n+++ b/shuup/core/__init__.py\n@@ -6,6 +6,7 @@\n # This source code is licensed under the AGPLv3 license found in the\n # LICENSE file in the root directory of this source tree.\n from shuup.apps import AppConfig\n+from shuup.core.excs import MissingSettingException\n \n \n class ShuupCoreAppConfig(AppConfig):\n@@ -27,5 +28,12 @@\n ],\n }\n \n+ def ready(self):\n+ from django.conf import settings\n+ if not getattr(settings, \"PARLER_DEFAULT_LANGUAGE_CODE\", None):\n+ raise MissingSettingException(\"PARLER_DEFAULT_LANGUAGE_CODE must be set.\")\n+ if not getattr(settings, \"PARLER_LANGUAGES\", None):\n+ raise MissingSettingException(\"PARLER_LANGUAGES must be set.\")\n+\n \n default_app_config = \"shuup.core.ShuupCoreAppConfig\"\ndiff --git a/shuup/core/excs.py b/shuup/core/excs.py\n--- a/shuup/core/excs.py\n+++ b/shuup/core/excs.py\n@@ -32,6 +32,10 @@\n pass\n \n \n+class MissingSettingException(Exception):\n+ pass\n+\n+\n class ProductNotOrderableProblem(Problem):\n pass\n", "issue": "System check to verify Parler sanity\nShuup should check that the Parler configuration is sane before starting.\n\n@JsseL and @juhakujala puzzled over an unrelated exception (`'shuup.admin.modules.services.behavior_form_part.BehaviorFormSet object' has no attribute 'empty_form'`) for a while \u2013 turns out it was an `AttributeError` ([which, as we unfortunately know, are hidden within `@property`s](https://github.com/shuup/shuup/blob/5584ebf912bae415fe367ea0c00ad4c5cff49244/shuup/utils/form_group.py#L86-L100)) within `FormSet.empty_form` calls that happens due to `PARLER_DEFAULT_LANGUAGE_CODE` being undefined:\n\n```\nTraceback (most recent call last):\n File \"~/django/forms/formsets.py\", line 187, in empty_form\n empty_permitted=True,\n File \"~/shuup/admin/modules/services/behavior_form_part.py\", line 49, in form\n kwargs.setdefault(\"default_language\", settings.PARLER_DEFAULT_LANGUAGE_CODE)\n File \"~/django/conf/__init__.py\", line 49, in __getattr__\n return getattr(self._wrapped, name)\nAttributeError: 'Settings' object has no attribute 'PARLER_DEFAULT_LANGUAGE_CODE'\n```\n\nMy suggestion is to add a simple system check in [ShuupCoreAppConfig.ready()](https://github.com/shuup/shuup/blob/5584ebf912bae415fe367ea0c00ad4c5cff49244/shuup/core/__init__.py#L11) that throws an exception if some of the Parler settings (`PARLER_DEFAULT_LANGUAGE_CODE` and `PARLER_LANGUAGES`) are unset -- or perhaps it could automatically derive them based on the Django `LANGUAGES` setting, as \"sane defaults\" go?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.\n#\n# This source code is licensed under the AGPLv3 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom shuup.apps import AppConfig\n\n\nclass ShuupCoreAppConfig(AppConfig):\n name = \"shuup.core\"\n verbose_name = \"Shuup Core\"\n label = \"shuup\" # Use \"shuup\" as app_label instead of \"core\"\n required_installed_apps = (\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"easy_thumbnails\",\n \"filer\",\n )\n provides = {\n \"api_populator\": [\n \"shuup.core.api:populate_core_api\"\n ],\n \"pricing_module\": [\n \"shuup.core.pricing.default_pricing:DefaultPricingModule\"\n ],\n }\n\n\ndefault_app_config = \"shuup.core.ShuupCoreAppConfig\"\n", "path": "shuup/core/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.\n#\n# This source code is licensed under the AGPLv3 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom shuup.utils.excs import Problem\n\n\nclass ImmutabilityError(ValueError):\n pass\n\n\nclass NoProductsToShipException(Exception):\n pass\n\n\nclass NoPaymentToCreateException(Exception):\n pass\n\n\nclass NoRefundToCreateException(Exception):\n pass\n\n\nclass RefundExceedsAmountException(Exception):\n pass\n\n\nclass InvalidRefundAmountException(Exception):\n pass\n\n\nclass ProductNotOrderableProblem(Problem):\n pass\n\n\nclass ProductNotVisibleProblem(Problem):\n pass\n\n\nclass ImpossibleProductModeException(ValueError):\n def __init__(self, message, code=None):\n super(ImpossibleProductModeException, self).__init__(message)\n self.code = code\n", "path": "shuup/core/excs.py"}]} | 1,602 | 312 |
gh_patches_debug_29881 | rasdani/github-patches | git_diff | e2nIEE__pandapower-880 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing dependencies: xlswriter, xlrd, cryptography
Hi,
I am currently following the instructions for the installation of the development version, as shown here: https://www.pandapower.org/start/#develop
I have a brand new virtual environment on Python 3.8.3 (Windows 10, 64 bits), and the tests failed because of the following missing dependencies:
> Edit: Same result on Python 3.7.8.
1. xlsxwriter: `FAILED pandapower\test\api\test_file_io.py::test_excel[1] - ModuleNotFoundError: No module named 'xlsxwriter'`
2. xlrd: `FAILED pandapower\test\api\test_file_io.py::test_excel[1] - ImportError: Missing optional dependency 'xlrd'. Install xlrd >= 1.0.0 for Excel support Use pip or conda to install xlrd.`
3. cryptography: `FAILED pandapower\test\api\test_file_io.py::test_encrypted_json[1] - ModuleNotFoundError: No module named 'cryptography'`
The permanent solution would most likely be to add those to setup.py and mention them in the documentation, but you might want to check if you should restrict the version.
P.S.: The tests still ended up failing, but that's a seperate issue (see issue #876 ).
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
4 # and Energy System Technology (IEE), Kassel. All rights reserved.
5
6 from setuptools import setup, find_packages
7 import re
8
9 with open('README.rst', 'rb') as f:
10 install = f.read().decode('utf-8')
11
12 with open('CHANGELOG.rst', 'rb') as f:
13 changelog = f.read().decode('utf-8')
14
15 classifiers = [
16 'Development Status :: 5 - Production/Stable',
17 'Environment :: Console',
18 'Intended Audience :: Developers',
19 'Intended Audience :: Education',
20 'Intended Audience :: Science/Research',
21 'License :: OSI Approved :: BSD License',
22 'Natural Language :: English',
23 'Operating System :: OS Independent',
24 'Programming Language :: Python',
25 'Programming Language :: Python :: 3']
26
27 with open('.travis.yml', 'rb') as f:
28 lines = f.read().decode('utf-8')
29 for version in re.findall('python: 3.[0-9]', lines):
30 classifiers.append('Programming Language :: Python :: 3.%s' % version[-1])
31
32 long_description = '\n\n'.join((install, changelog))
33
34 setup(
35 name='pandapower',
36 version='2.3.0',
37 author='Leon Thurner, Alexander Scheidler',
38 author_email='[email protected], [email protected]',
39 description='Convenient Power System Modelling and Analysis based on PYPOWER and pandas',
40 long_description=long_description,
41 long_description_content_type='text/x-rst',
42 url='http://www.pandapower.org',
43 license='BSD',
44 install_requires=["pandas>=0.17",
45 "networkx",
46 "scipy",
47 "numpy>=0.11",
48 "packaging"],
49 extras_require={":python_version<'3.0'": ["future"],
50 "docs": ["numpydoc", "sphinx", "sphinx_rtd_theme"],
51 "plotting": ["plotly", "matplotlib", "python-igraph"],
52 "test": ["pytest", "pytest-xdist"]},
53 packages=find_packages(),
54 include_package_data=True,
55 classifiers=classifiers
56 )
57
[end of setup.py]
[start of pandapower/__init__.py]
1 __version__ = "2.3.0"
2
3 import os
4 pp_dir = os.path.dirname(os.path.realpath(__file__))
5
6 from pandapower.auxiliary import *
7 from pandapower.convert_format import *
8 from pandapower.create import *
9 from pandapower.diagnostic import *
10 from pandapower.file_io import *
11 from pandapower.run import *
12 from pandapower.runpm import *
13 from pandapower.std_types import *
14 from pandapower.toolbox import *
15 from pandapower.powerflow import *
16 from pandapower.opf import *
17 from pandapower.optimal_powerflow import OPFNotConverged
18 from pandapower.pf.runpp_3ph import runpp_3ph
19 import pandas as pd
20 pd.options.mode.chained_assignment = None # default='warn'
21
[end of pandapower/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pandapower/__init__.py b/pandapower/__init__.py
--- a/pandapower/__init__.py
+++ b/pandapower/__init__.py
@@ -1,4 +1,4 @@
-__version__ = "2.3.0"
+__version__ = "2.3.1"
import os
pp_dir = os.path.dirname(os.path.realpath(__file__))
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,10 +33,10 @@
setup(
name='pandapower',
- version='2.3.0',
+ version='2.3.1',
author='Leon Thurner, Alexander Scheidler',
author_email='[email protected], [email protected]',
- description='Convenient Power System Modelling and Analysis based on PYPOWER and pandas',
+ description='An easy to use open source tool for power system modeling, analysis and optimization with a high degree of automation.',
long_description=long_description,
long_description_content_type='text/x-rst',
url='http://www.pandapower.org',
@@ -45,11 +45,14 @@
"networkx",
"scipy",
"numpy>=0.11",
- "packaging"],
- extras_require={":python_version<'3.0'": ["future"],
- "docs": ["numpydoc", "sphinx", "sphinx_rtd_theme"],
- "plotting": ["plotly", "matplotlib", "python-igraph"],
- "test": ["pytest", "pytest-xdist"]},
+ "packaging",
+ "xlsxwriter",
+ "xlrd",
+ "cryptography"],
+ extras_require={
+ "docs": ["numpydoc", "sphinx", "sphinx_rtd_theme"],
+ "plotting": ["plotly", "matplotlib", "python-igraph"],
+ "test": ["pytest", "pytest-xdist"]},
packages=find_packages(),
include_package_data=True,
classifiers=classifiers
| {"golden_diff": "diff --git a/pandapower/__init__.py b/pandapower/__init__.py\n--- a/pandapower/__init__.py\n+++ b/pandapower/__init__.py\n@@ -1,4 +1,4 @@\n-__version__ = \"2.3.0\"\n+__version__ = \"2.3.1\"\n \n import os\n pp_dir = os.path.dirname(os.path.realpath(__file__))\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,10 +33,10 @@\n \n setup(\n name='pandapower',\n- version='2.3.0',\n+ version='2.3.1',\n author='Leon Thurner, Alexander Scheidler',\n author_email='[email protected], [email protected]',\n- description='Convenient Power System Modelling and Analysis based on PYPOWER and pandas',\n+ description='An easy to use open source tool for power system modeling, analysis and optimization with a high degree of automation.',\n long_description=long_description,\n \tlong_description_content_type='text/x-rst',\n url='http://www.pandapower.org',\n@@ -45,11 +45,14 @@\n \"networkx\",\n \"scipy\",\n \"numpy>=0.11\",\n- \"packaging\"],\n- extras_require={\":python_version<'3.0'\": [\"future\"],\n- \"docs\": [\"numpydoc\", \"sphinx\", \"sphinx_rtd_theme\"],\n- \"plotting\": [\"plotly\", \"matplotlib\", \"python-igraph\"],\n- \"test\": [\"pytest\", \"pytest-xdist\"]},\n+ \"packaging\",\n+\t\t\t\t\t \"xlsxwriter\",\n+\t\t\t\t\t \"xlrd\",\n+\t\t\t\t\t \"cryptography\"],\n+ extras_require={\n+\t\t\"docs\": [\"numpydoc\", \"sphinx\", \"sphinx_rtd_theme\"],\n+\t\t\"plotting\": [\"plotly\", \"matplotlib\", \"python-igraph\"],\n+\t\t\"test\": [\"pytest\", \"pytest-xdist\"]},\n packages=find_packages(),\n include_package_data=True,\n classifiers=classifiers\n", "issue": "Missing dependencies: xlswriter, xlrd, cryptography\nHi,\r\n\r\nI am currently following the instructions for the installation of the development version, as shown here: https://www.pandapower.org/start/#develop\r\n\r\nI have a brand new virtual environment on Python 3.8.3 (Windows 10, 64 bits), and the tests failed because of the following missing dependencies:\r\n\r\n> Edit: Same result on Python 3.7.8.\r\n\r\n1. xlsxwriter: `FAILED pandapower\\test\\api\\test_file_io.py::test_excel[1] - ModuleNotFoundError: No module named 'xlsxwriter'`\r\n2. xlrd: `FAILED pandapower\\test\\api\\test_file_io.py::test_excel[1] - ImportError: Missing optional dependency 'xlrd'. Install xlrd >= 1.0.0 for Excel support Use pip or conda to install xlrd.`\r\n3. cryptography: `FAILED pandapower\\test\\api\\test_file_io.py::test_encrypted_json[1] - ModuleNotFoundError: No module named 'cryptography'`\r\n\r\nThe permanent solution would most likely be to add those to setup.py and mention them in the documentation, but you might want to check if you should restrict the version.\r\n\r\nP.S.: The tests still ended up failing, but that's a seperate issue (see issue #876 ).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nfrom setuptools import setup, find_packages\nimport re\n\nwith open('README.rst', 'rb') as f:\n install = f.read().decode('utf-8')\n\nwith open('CHANGELOG.rst', 'rb') as f:\n changelog = f.read().decode('utf-8')\n\nclassifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3']\n\nwith open('.travis.yml', 'rb') as f:\n lines = f.read().decode('utf-8')\n for version in re.findall('python: 3.[0-9]', lines):\n classifiers.append('Programming Language :: Python :: 3.%s' % version[-1])\n\nlong_description = '\\n\\n'.join((install, changelog))\n\nsetup(\n name='pandapower',\n version='2.3.0',\n author='Leon Thurner, Alexander Scheidler',\n author_email='[email protected], [email protected]',\n description='Convenient Power System Modelling and Analysis based on PYPOWER and pandas',\n long_description=long_description,\n\tlong_description_content_type='text/x-rst',\n url='http://www.pandapower.org',\n license='BSD',\n install_requires=[\"pandas>=0.17\",\n \"networkx\",\n \"scipy\",\n \"numpy>=0.11\",\n \"packaging\"],\n extras_require={\":python_version<'3.0'\": [\"future\"],\n \"docs\": [\"numpydoc\", \"sphinx\", \"sphinx_rtd_theme\"],\n \"plotting\": [\"plotly\", \"matplotlib\", \"python-igraph\"],\n \"test\": [\"pytest\", \"pytest-xdist\"]},\n packages=find_packages(),\n include_package_data=True,\n classifiers=classifiers\n)\n", "path": "setup.py"}, {"content": "__version__ = \"2.3.0\"\n\nimport os\npp_dir = os.path.dirname(os.path.realpath(__file__))\n\nfrom pandapower.auxiliary import *\nfrom pandapower.convert_format import *\nfrom pandapower.create import *\nfrom pandapower.diagnostic import *\nfrom pandapower.file_io import *\nfrom pandapower.run import *\nfrom pandapower.runpm import *\nfrom pandapower.std_types import *\nfrom pandapower.toolbox import *\nfrom pandapower.powerflow import *\nfrom pandapower.opf import *\nfrom pandapower.optimal_powerflow import OPFNotConverged\nfrom pandapower.pf.runpp_3ph import runpp_3ph\nimport pandas as pd\npd.options.mode.chained_assignment = None # default='warn'\n", "path": "pandapower/__init__.py"}]} | 1,671 | 490 |
gh_patches_debug_19563 | rasdani/github-patches | git_diff | Flexget__Flexget-1345 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeError with Form Login Plugin
### Expected behaviour:
Task runs without generating error.
### Actual behaviour:
Task runs and generates the following error
```
TypeError: must be unicode, not str
```
### Steps to reproduce:
- Step 1: Install latest version of Flexget using virtualenv
- Step 2: pip install mechanize
- Step 3: Create config.yml
- Step 4: flexget --test execute
#### Config:
```
tasks:
test task:
form:
url: http://example.com/login.php
username: email address
password: password
html:
url: http://example.com/
```
#### Log:
Crash:
```
2016-08-16 11:40 DEBUG manager test task Traceback:
Traceback (most recent call last):
File "/home/username/flexget/local/lib/python2.7/site-packages/flexget/task.py", line 444, in __run_plugin
return method(*args, **kwargs)
File "/home/username/flexget/local/lib/python2.7/site-packages/flexget/event.py", line 23, in __call__
return self.func(*args, **kwargs)
File "/home/username/flexget/local/lib/python2.7/site-packages/flexget/plugins/plugin_formlogin.py", line 73, in on_task_start
f.write(br.response().get_data())
TypeError: must be unicode, not str
2016-08-16 11:40 WARNING task test task Aborting task (plugin: form)
2016-08-16 11:40 DEBUG task_queue task test task aborted: TaskAbort(reason=BUG: Unhandled error in plugin form: must be unicode, not str, silent=False)
```
Full log.
```
http://pastebin.com/yBRqhYjR
```
### Additional information:
- Flexget Version: 2.2.20
- Python Version: 2.7.9
- Installation method: Virtualenv
- OS and version: Debian 8
</issue>
<code>
[start of flexget/plugins/plugin_formlogin.py]
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # pylint: disable=unused-import, redefined-builtin
3
4 import logging
5 import os
6 import socket
7
8 from flexget import plugin
9 from flexget.event import event
10
11 log = logging.getLogger('formlogin')
12
13
14 class FormLogin(object):
15 """
16 Login on form
17 """
18
19 schema = {
20 'type': 'object',
21 'properties': {
22 'url': {'type': 'string', 'format': 'url'},
23 'username': {'type': 'string'},
24 'password': {'type': 'string'},
25 'userfield': {'type': 'string'},
26 'passfield': {'type': 'string'}
27 },
28 'required': ['url', 'username', 'password'],
29 'additionalProperties': False
30 }
31
32 def on_task_start(self, task, config):
33 try:
34 from mechanize import Browser
35 except ImportError:
36 raise plugin.PluginError('mechanize required (python module), please install it.', log)
37
38 userfield = config.get('userfield', 'username')
39 passfield = config.get('passfield', 'password')
40
41 url = config['url']
42 username = config['username']
43 password = config['password']
44
45 br = Browser()
46 br.set_handle_robots(False)
47 try:
48 br.open(url)
49 except Exception:
50 # TODO: improve error handling
51 raise plugin.PluginError('Unable to post login form', log)
52
53 # br.set_debug_redirects(True)
54 # br.set_debug_responses(True)
55 # br.set_debug_http(True)
56
57 try:
58 for form in br.forms():
59 loginform = form
60
61 try:
62 loginform[userfield] = username
63 loginform[passfield] = password
64 break
65 except Exception:
66 pass
67 else:
68 received = os.path.join(task.manager.config_base, 'received')
69 if not os.path.isdir(received):
70 os.mkdir(received)
71 filename = os.path.join(received, '%s.formlogin.html' % task.name)
72 with open(filename, 'w') as f:
73 f.write(br.response().get_data())
74 log.critical('I have saved the login page content to %s for you to view' % filename)
75 raise plugin.PluginError('Unable to find login fields', log)
76 except socket.timeout:
77 raise plugin.PluginError('Timed out on url %s' % url)
78
79 br.form = loginform
80
81 br.submit()
82
83 cookiejar = br._ua_handlers["_cookies"].cookiejar
84
85 # Add cookiejar to our requests session
86 task.requests.add_cookiejar(cookiejar)
87
88
89 @event('plugin.register')
90 def register_plugin():
91 plugin.register(FormLogin, 'form', api_ver=2)
92
[end of flexget/plugins/plugin_formlogin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/plugins/plugin_formlogin.py b/flexget/plugins/plugin_formlogin.py
--- a/flexget/plugins/plugin_formlogin.py
+++ b/flexget/plugins/plugin_formlogin.py
@@ -2,6 +2,7 @@
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
+import io
import os
import socket
@@ -69,7 +70,7 @@
if not os.path.isdir(received):
os.mkdir(received)
filename = os.path.join(received, '%s.formlogin.html' % task.name)
- with open(filename, 'w') as f:
+ with io.open(filename, 'wb') as f:
f.write(br.response().get_data())
log.critical('I have saved the login page content to %s for you to view' % filename)
raise plugin.PluginError('Unable to find login fields', log)
| {"golden_diff": "diff --git a/flexget/plugins/plugin_formlogin.py b/flexget/plugins/plugin_formlogin.py\n--- a/flexget/plugins/plugin_formlogin.py\n+++ b/flexget/plugins/plugin_formlogin.py\n@@ -2,6 +2,7 @@\n from builtins import * # pylint: disable=unused-import, redefined-builtin\n \n import logging\n+import io\n import os\n import socket\n \n@@ -69,7 +70,7 @@\n if not os.path.isdir(received):\n os.mkdir(received)\n filename = os.path.join(received, '%s.formlogin.html' % task.name)\n- with open(filename, 'w') as f:\n+ with io.open(filename, 'wb') as f:\n f.write(br.response().get_data())\n log.critical('I have saved the login page content to %s for you to view' % filename)\n raise plugin.PluginError('Unable to find login fields', log)\n", "issue": "TypeError with Form Login Plugin\n### Expected behaviour:\n\nTask runs without generating error.\n### Actual behaviour:\n\nTask runs and generates the following error\n\n```\nTypeError: must be unicode, not str\n```\n### Steps to reproduce:\n- Step 1: Install latest version of Flexget using virtualenv\n- Step 2: pip install mechanize\n- Step 3: Create config.yml\n- Step 4: flexget --test execute\n#### Config:\n\n```\ntasks:\n test task:\n form:\n url: http://example.com/login.php\n username: email address\n password: password\n html:\n url: http://example.com/\n```\n#### Log:\n\nCrash:\n\n```\n2016-08-16 11:40 DEBUG manager test task Traceback:\nTraceback (most recent call last):\n File \"/home/username/flexget/local/lib/python2.7/site-packages/flexget/task.py\", line 444, in __run_plugin\n return method(*args, **kwargs)\n File \"/home/username/flexget/local/lib/python2.7/site-packages/flexget/event.py\", line 23, in __call__\n return self.func(*args, **kwargs)\n File \"/home/username/flexget/local/lib/python2.7/site-packages/flexget/plugins/plugin_formlogin.py\", line 73, in on_task_start\n f.write(br.response().get_data())\nTypeError: must be unicode, not str\n2016-08-16 11:40 WARNING task test task Aborting task (plugin: form)\n2016-08-16 11:40 DEBUG task_queue task test task aborted: TaskAbort(reason=BUG: Unhandled error in plugin form: must be unicode, not str, silent=False)\n```\n\nFull log.\n\n```\nhttp://pastebin.com/yBRqhYjR\n```\n### Additional information:\n- Flexget Version: 2.2.20\n- Python Version: 2.7.9\n- Installation method: Virtualenv\n- OS and version: Debian 8\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # pylint: disable=unused-import, redefined-builtin\n\nimport logging\nimport os\nimport socket\n\nfrom flexget import plugin\nfrom flexget.event import event\n\nlog = logging.getLogger('formlogin')\n\n\nclass FormLogin(object):\n \"\"\"\n Login on form\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'url': {'type': 'string', 'format': 'url'},\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'userfield': {'type': 'string'},\n 'passfield': {'type': 'string'}\n },\n 'required': ['url', 'username', 'password'],\n 'additionalProperties': False\n }\n\n def on_task_start(self, task, config):\n try:\n from mechanize import Browser\n except ImportError:\n raise plugin.PluginError('mechanize required (python module), please install it.', log)\n\n userfield = config.get('userfield', 'username')\n passfield = config.get('passfield', 'password')\n\n url = config['url']\n username = config['username']\n password = config['password']\n\n br = Browser()\n br.set_handle_robots(False)\n try:\n br.open(url)\n except Exception:\n # TODO: improve error handling\n raise plugin.PluginError('Unable to post login form', log)\n\n # br.set_debug_redirects(True)\n # br.set_debug_responses(True)\n # br.set_debug_http(True)\n\n try:\n for form in br.forms():\n loginform = form\n\n try:\n loginform[userfield] = username\n loginform[passfield] = password\n break\n except Exception:\n pass\n else:\n received = os.path.join(task.manager.config_base, 'received')\n if not os.path.isdir(received):\n os.mkdir(received)\n filename = os.path.join(received, '%s.formlogin.html' % task.name)\n with open(filename, 'w') as f:\n f.write(br.response().get_data())\n log.critical('I have saved the login page content to %s for you to view' % filename)\n raise plugin.PluginError('Unable to find login fields', log)\n except socket.timeout:\n raise plugin.PluginError('Timed out on url %s' % url)\n\n br.form = loginform\n\n br.submit()\n\n cookiejar = br._ua_handlers[\"_cookies\"].cookiejar\n\n # Add cookiejar to our requests session\n task.requests.add_cookiejar(cookiejar)\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(FormLogin, 'form', api_ver=2)\n", "path": "flexget/plugins/plugin_formlogin.py"}]} | 1,783 | 204 |
gh_patches_debug_3564 | rasdani/github-patches | git_diff | pypa__setuptools-2369 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SystemError: Parent module 'setuptools' not loaded, cannot perform relative import with setuptools 50
After upgrading setuptools to 50.0 today, the environment fails to locate the entry points as it could not import distutils
```
$ python --version
Python 3.5.1
$ python -c "import distutils"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "<frozen importlib._bootstrap>", line 969, in _find_and_load
File "<frozen importlib._bootstrap>", line 958, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 666, in _load_unlocked
File "<frozen importlib._bootstrap>", line 577, in module_from_spec
File "/home/gchan/tmp/setuptools-python-3.5/lib/python3.5/site-packages/_distutils_hack/__init__.py", line 82, in create_module
return importlib.import_module('._distutils', 'setuptools')
File "/home/gchan/tmp/setuptools-python-3.5/lib64/python3.5/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 981, in _gcd_import
File "<frozen importlib._bootstrap>", line 931, in _sanity_check
SystemError: Parent module 'setuptools' not loaded, cannot perform relative import
```
The issue could not be found in the python 3.8 environment.
</issue>
<code>
[start of _distutils_hack/__init__.py]
1 import sys
2 import os
3 import re
4 import importlib
5 import warnings
6
7
8 is_pypy = '__pypy__' in sys.builtin_module_names
9
10
11 def warn_distutils_present():
12 if 'distutils' not in sys.modules:
13 return
14 if is_pypy and sys.version_info < (3, 7):
15 # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
16 # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
17 return
18 warnings.warn(
19 "Distutils was imported before Setuptools, but importing Setuptools "
20 "also replaces the `distutils` module in `sys.modules`. This may lead "
21 "to undesirable behaviors or errors. To avoid these issues, avoid "
22 "using distutils directly, ensure that setuptools is installed in the "
23 "traditional way (e.g. not an editable install), and/or make sure that "
24 "setuptools is always imported before distutils.")
25
26
27 def clear_distutils():
28 if 'distutils' not in sys.modules:
29 return
30 warnings.warn("Setuptools is replacing distutils.")
31 mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
32 for name in mods:
33 del sys.modules[name]
34
35
36 def enabled():
37 """
38 Allow selection of distutils by environment variable.
39 """
40 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
41 return which == 'local'
42
43
44 def ensure_local_distutils():
45 clear_distutils()
46 distutils = importlib.import_module('setuptools._distutils')
47 distutils.__name__ = 'distutils'
48 sys.modules['distutils'] = distutils
49
50 # sanity check that submodules load as expected
51 core = importlib.import_module('distutils.core')
52 assert '_distutils' in core.__file__, core.__file__
53
54
55 def do_override():
56 """
57 Ensure that the local copy of distutils is preferred over stdlib.
58
59 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
60 for more motivation.
61 """
62 if enabled():
63 warn_distutils_present()
64 ensure_local_distutils()
65
66
67 class DistutilsMetaFinder:
68 def find_spec(self, fullname, path, target=None):
69 if path is not None:
70 return
71
72 method_name = 'spec_for_{fullname}'.format(**locals())
73 method = getattr(self, method_name, lambda: None)
74 return method()
75
76 def spec_for_distutils(self):
77 import importlib.abc
78 import importlib.util
79
80 class DistutilsLoader(importlib.abc.Loader):
81
82 def create_module(self, spec):
83 return importlib.import_module('._distutils', 'setuptools')
84
85 def exec_module(self, module):
86 pass
87
88 return importlib.util.spec_from_loader('distutils', DistutilsLoader())
89
90 def spec_for_pip(self):
91 """
92 Ensure stdlib distutils when running under pip.
93 See pypa/pip#8761 for rationale.
94 """
95 clear_distutils()
96 self.spec_for_distutils = lambda: None
97
98
99 DISTUTILS_FINDER = DistutilsMetaFinder()
100
101
102 def add_shim():
103 sys.meta_path.insert(0, DISTUTILS_FINDER)
104
105
106 def remove_shim():
107 try:
108 sys.meta_path.remove(DISTUTILS_FINDER)
109 except ValueError:
110 pass
111
[end of _distutils_hack/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py
--- a/_distutils_hack/__init__.py
+++ b/_distutils_hack/__init__.py
@@ -80,7 +80,7 @@
class DistutilsLoader(importlib.abc.Loader):
def create_module(self, spec):
- return importlib.import_module('._distutils', 'setuptools')
+ return importlib.import_module('setuptools._distutils')
def exec_module(self, module):
pass
| {"golden_diff": "diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py\n--- a/_distutils_hack/__init__.py\n+++ b/_distutils_hack/__init__.py\n@@ -80,7 +80,7 @@\n class DistutilsLoader(importlib.abc.Loader):\n \n def create_module(self, spec):\n- return importlib.import_module('._distutils', 'setuptools')\n+ return importlib.import_module('setuptools._distutils')\n \n def exec_module(self, module):\n pass\n", "issue": "SystemError: Parent module 'setuptools' not loaded, cannot perform relative import with setuptools 50\nAfter upgrading setuptools to 50.0 today, the environment fails to locate the entry points as it could not import distutils\r\n\r\n```\r\n$ python --version\r\nPython 3.5.1\r\n$ python -c \"import distutils\"\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"<frozen importlib._bootstrap>\", line 969, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 958, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 666, in _load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 577, in module_from_spec\r\n File \"/home/gchan/tmp/setuptools-python-3.5/lib/python3.5/site-packages/_distutils_hack/__init__.py\", line 82, in create_module\r\n return importlib.import_module('._distutils', 'setuptools')\r\n File \"/home/gchan/tmp/setuptools-python-3.5/lib64/python3.5/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 981, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 931, in _sanity_check\r\nSystemError: Parent module 'setuptools' not loaded, cannot perform relative import\r\n```\r\n\r\nThe issue could not be found in the python 3.8 environment. \n", "before_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure that \"\n \"setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n distutils = importlib.import_module('setuptools._distutils')\n distutils.__name__ = 'distutils'\n sys.modules['distutils'] = distutils\n\n # sanity check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib.abc\n import importlib.util\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('._distutils', 'setuptools')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}]} | 1,942 | 123 |
gh_patches_debug_31088 | rasdani/github-patches | git_diff | shapiromatron__hawc-505 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dosing regime dose groups hotfix
We had a reported data corruption issue where a user edited content in a dosing regime and then after saving, we found multiple endpoint-groups with the same endpoint group id, which shouldn't be possible.
After investigation, we found it was an error in the signal which keeps dose-groups and endpoint-groups synced. If there were multiple representation of dose-groups, for example 5 dose-groups and 2 units, then hawc would create 10 endpoint-groups instead of 5. Further, it would create these even for endpoints where data is not extracted.
Here we fix this issue and write a few tests.
</issue>
<code>
[start of hawc/apps/animal/admin.py]
1 from django.contrib import admin
2
3 from . import models
4
5
6 @admin.register(models.Experiment)
7 class ExperimentAdmin(admin.ModelAdmin):
8 list_display = (
9 "id",
10 "study",
11 "name",
12 "type",
13 "has_multiple_generations",
14 "chemical",
15 "cas",
16 "created",
17 )
18 list_filter = ("type", "has_multiple_generations", "chemical", "study__assessment")
19 search_fields = (
20 "study__short_citation",
21 "name",
22 )
23
24
25 @admin.register(models.AnimalGroup)
26 class AnimalGroupAdmin(admin.ModelAdmin):
27 list_display = (
28 "id",
29 "experiment",
30 "name",
31 "species",
32 "strain",
33 "sex",
34 "created",
35 )
36 list_filter = ("species", "strain", "sex", "experiment__study__assessment_id")
37 search_fields = ("name",)
38
39
40 @admin.register(models.DosingRegime)
41 class DosingRegimeAdmin(admin.ModelAdmin):
42 list_display = (
43 "id",
44 "dosed_animals",
45 "route_of_exposure",
46 "duration_exposure",
47 "num_dose_groups",
48 "created",
49 )
50 list_filter = (
51 "route_of_exposure",
52 "num_dose_groups",
53 "dosed_animals__experiment__study__assessment_id",
54 )
55 list_select_related = ("dosed_animals",)
56 search_fields = ("dosed_animals__name",)
57
58
59 @admin.register(models.Endpoint)
60 class EndpointAdmin(admin.ModelAdmin):
61 list_display = (
62 "id",
63 "assessment_id",
64 "animal_group",
65 "name",
66 "system",
67 "organ",
68 "effect",
69 "data_extracted",
70 "created",
71 )
72 list_filter = ("system", "organ", "data_extracted", "assessment_id")
73 search_fields = ("name",)
74
[end of hawc/apps/animal/admin.py]
[start of hawc/services/utils/git.py]
1 import subprocess
2 from datetime import datetime
3
4 from pydantic import BaseModel
5
6
7 class Commit(BaseModel):
8 sha: str
9 dt: datetime
10
11 @classmethod
12 def current(cls, cwd: str = ".") -> "Commit":
13 """Return information on the last commit at the repository path desired.
14
15 Returns:
16 A Commit instance
17 """
18 cmd = "git log -1 --format=%H"
19 sha = subprocess.check_output(cmd.split(), cwd=cwd).decode().strip()[:12]
20 cmd = "git show -s --format=%ct"
21 dt = datetime.fromtimestamp(
22 int(subprocess.check_output(cmd.split(), cwd=cwd).decode().strip())
23 )
24 return cls(sha=sha, dt=dt)
25
[end of hawc/services/utils/git.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hawc/apps/animal/admin.py b/hawc/apps/animal/admin.py
--- a/hawc/apps/animal/admin.py
+++ b/hawc/apps/animal/admin.py
@@ -35,6 +35,13 @@
)
list_filter = ("species", "strain", "sex", "experiment__study__assessment_id")
search_fields = ("name",)
+ raw_id_fields = ("experiment", "species", "strain", "dosing_regime", "siblings")
+
+
+class DoseGroupInline(admin.TabularInline):
+ model = models.DoseGroup
+ raw_id_fields = ("dose_units",)
+ extra = 0
@admin.register(models.DosingRegime)
@@ -54,6 +61,12 @@
)
list_select_related = ("dosed_animals",)
search_fields = ("dosed_animals__name",)
+ inlines = (DoseGroupInline,)
+
+
+class EndpointGroupInline(admin.TabularInline):
+ model = models.EndpointGroup
+ extra = 0
@admin.register(models.Endpoint)
@@ -71,3 +84,13 @@
)
list_filter = ("system", "organ", "data_extracted", "assessment_id")
search_fields = ("name",)
+ raw_id_fields = (
+ "assessment",
+ "animal_group",
+ "system_term",
+ "organ_term",
+ "effect_term",
+ "effect_subtype_term",
+ "name_term",
+ )
+ inlines = (EndpointGroupInline,)
diff --git a/hawc/services/utils/git.py b/hawc/services/utils/git.py
--- a/hawc/services/utils/git.py
+++ b/hawc/services/utils/git.py
@@ -16,7 +16,7 @@
A Commit instance
"""
cmd = "git log -1 --format=%H"
- sha = subprocess.check_output(cmd.split(), cwd=cwd).decode().strip()[:12]
+ sha = subprocess.check_output(cmd.split(), cwd=cwd).decode().strip()[:8]
cmd = "git show -s --format=%ct"
dt = datetime.fromtimestamp(
int(subprocess.check_output(cmd.split(), cwd=cwd).decode().strip())
| {"golden_diff": "diff --git a/hawc/apps/animal/admin.py b/hawc/apps/animal/admin.py\n--- a/hawc/apps/animal/admin.py\n+++ b/hawc/apps/animal/admin.py\n@@ -35,6 +35,13 @@\n )\n list_filter = (\"species\", \"strain\", \"sex\", \"experiment__study__assessment_id\")\n search_fields = (\"name\",)\n+ raw_id_fields = (\"experiment\", \"species\", \"strain\", \"dosing_regime\", \"siblings\")\n+\n+\n+class DoseGroupInline(admin.TabularInline):\n+ model = models.DoseGroup\n+ raw_id_fields = (\"dose_units\",)\n+ extra = 0\n \n \n @admin.register(models.DosingRegime)\n@@ -54,6 +61,12 @@\n )\n list_select_related = (\"dosed_animals\",)\n search_fields = (\"dosed_animals__name\",)\n+ inlines = (DoseGroupInline,)\n+\n+\n+class EndpointGroupInline(admin.TabularInline):\n+ model = models.EndpointGroup\n+ extra = 0\n \n \n @admin.register(models.Endpoint)\n@@ -71,3 +84,13 @@\n )\n list_filter = (\"system\", \"organ\", \"data_extracted\", \"assessment_id\")\n search_fields = (\"name\",)\n+ raw_id_fields = (\n+ \"assessment\",\n+ \"animal_group\",\n+ \"system_term\",\n+ \"organ_term\",\n+ \"effect_term\",\n+ \"effect_subtype_term\",\n+ \"name_term\",\n+ )\n+ inlines = (EndpointGroupInline,)\ndiff --git a/hawc/services/utils/git.py b/hawc/services/utils/git.py\n--- a/hawc/services/utils/git.py\n+++ b/hawc/services/utils/git.py\n@@ -16,7 +16,7 @@\n A Commit instance\n \"\"\"\n cmd = \"git log -1 --format=%H\"\n- sha = subprocess.check_output(cmd.split(), cwd=cwd).decode().strip()[:12]\n+ sha = subprocess.check_output(cmd.split(), cwd=cwd).decode().strip()[:8]\n cmd = \"git show -s --format=%ct\"\n dt = datetime.fromtimestamp(\n int(subprocess.check_output(cmd.split(), cwd=cwd).decode().strip())\n", "issue": "dosing regime dose groups hotfix\nWe had a reported data corruption issue where a user edited content in a dosing regime and then after saving, we found multiple endpoint-groups with the same endpoint group id, which shouldn't be possible.\r\n\r\nAfter investigation, we found it was an error in the signal which keeps dose-groups and endpoint-groups synced. If there were multiple representation of dose-groups, for example 5 dose-groups and 2 units, then hawc would create 10 endpoint-groups instead of 5. Further, it would create these even for endpoints where data is not extracted.\r\n\r\nHere we fix this issue and write a few tests.\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom . import models\n\n\[email protected](models.Experiment)\nclass ExperimentAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"study\",\n \"name\",\n \"type\",\n \"has_multiple_generations\",\n \"chemical\",\n \"cas\",\n \"created\",\n )\n list_filter = (\"type\", \"has_multiple_generations\", \"chemical\", \"study__assessment\")\n search_fields = (\n \"study__short_citation\",\n \"name\",\n )\n\n\[email protected](models.AnimalGroup)\nclass AnimalGroupAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"experiment\",\n \"name\",\n \"species\",\n \"strain\",\n \"sex\",\n \"created\",\n )\n list_filter = (\"species\", \"strain\", \"sex\", \"experiment__study__assessment_id\")\n search_fields = (\"name\",)\n\n\[email protected](models.DosingRegime)\nclass DosingRegimeAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"dosed_animals\",\n \"route_of_exposure\",\n \"duration_exposure\",\n \"num_dose_groups\",\n \"created\",\n )\n list_filter = (\n \"route_of_exposure\",\n \"num_dose_groups\",\n \"dosed_animals__experiment__study__assessment_id\",\n )\n list_select_related = (\"dosed_animals\",)\n search_fields = (\"dosed_animals__name\",)\n\n\[email protected](models.Endpoint)\nclass EndpointAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"assessment_id\",\n \"animal_group\",\n \"name\",\n \"system\",\n \"organ\",\n \"effect\",\n \"data_extracted\",\n \"created\",\n )\n list_filter = (\"system\", \"organ\", \"data_extracted\", \"assessment_id\")\n search_fields = (\"name\",)\n", "path": "hawc/apps/animal/admin.py"}, {"content": "import subprocess\nfrom datetime import datetime\n\nfrom pydantic import BaseModel\n\n\nclass Commit(BaseModel):\n sha: str\n dt: datetime\n\n @classmethod\n def current(cls, cwd: str = \".\") -> \"Commit\":\n \"\"\"Return information on the last commit at the repository path desired.\n\n Returns:\n A Commit instance\n \"\"\"\n cmd = \"git log -1 --format=%H\"\n sha = subprocess.check_output(cmd.split(), cwd=cwd).decode().strip()[:12]\n cmd = \"git show -s --format=%ct\"\n dt = datetime.fromtimestamp(\n int(subprocess.check_output(cmd.split(), cwd=cwd).decode().strip())\n )\n return cls(sha=sha, dt=dt)\n", "path": "hawc/services/utils/git.py"}]} | 1,437 | 505 |
gh_patches_debug_3278 | rasdani/github-patches | git_diff | certbot__certbot-7294 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Certbot's Apache plugin doesn't work on Scientific Linux
See https://community.letsencrypt.org/t/noinstallationerror-cannot-find-apache-executable-apache2ctl/97980.
This should be fixable by adding an override in https://github.com/certbot/certbot/blob/master/certbot-apache/certbot_apache/entrypoint.py#L17.
</issue>
<code>
[start of certbot-apache/certbot_apache/entrypoint.py]
1 """ Entry point for Apache Plugin """
2 # Pylint does not like disutils.version when running inside a venv.
3 # See: https://github.com/PyCQA/pylint/issues/73
4 from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
5
6 from certbot import util
7
8 from certbot_apache import configurator
9 from certbot_apache import override_arch
10 from certbot_apache import override_fedora
11 from certbot_apache import override_darwin
12 from certbot_apache import override_debian
13 from certbot_apache import override_centos
14 from certbot_apache import override_gentoo
15 from certbot_apache import override_suse
16
17 OVERRIDE_CLASSES = {
18 "arch": override_arch.ArchConfigurator,
19 "darwin": override_darwin.DarwinConfigurator,
20 "debian": override_debian.DebianConfigurator,
21 "ubuntu": override_debian.DebianConfigurator,
22 "centos": override_centos.CentOSConfigurator,
23 "centos linux": override_centos.CentOSConfigurator,
24 "fedora_old": override_centos.CentOSConfigurator,
25 "fedora": override_fedora.FedoraConfigurator,
26 "ol": override_centos.CentOSConfigurator,
27 "red hat enterprise linux server": override_centos.CentOSConfigurator,
28 "rhel": override_centos.CentOSConfigurator,
29 "amazon": override_centos.CentOSConfigurator,
30 "gentoo": override_gentoo.GentooConfigurator,
31 "gentoo base system": override_gentoo.GentooConfigurator,
32 "opensuse": override_suse.OpenSUSEConfigurator,
33 "suse": override_suse.OpenSUSEConfigurator,
34 }
35
36
37 def get_configurator():
38 """ Get correct configurator class based on the OS fingerprint """
39 os_name, os_version = util.get_os_info()
40 os_name = os_name.lower()
41 override_class = None
42
43 # Special case for older Fedora versions
44 if os_name == 'fedora' and LooseVersion(os_version) < LooseVersion('29'):
45 os_name = 'fedora_old'
46
47 try:
48 override_class = OVERRIDE_CLASSES[os_name]
49 except KeyError:
50 # OS not found in the list
51 os_like = util.get_systemd_os_like()
52 if os_like:
53 for os_name in os_like:
54 if os_name in OVERRIDE_CLASSES.keys():
55 override_class = OVERRIDE_CLASSES[os_name]
56 if not override_class:
57 # No override class found, return the generic configurator
58 override_class = configurator.ApacheConfigurator
59 return override_class
60
61
62 ENTRYPOINT = get_configurator()
63
[end of certbot-apache/certbot_apache/entrypoint.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/certbot-apache/certbot_apache/entrypoint.py b/certbot-apache/certbot_apache/entrypoint.py
--- a/certbot-apache/certbot_apache/entrypoint.py
+++ b/certbot-apache/certbot_apache/entrypoint.py
@@ -31,6 +31,8 @@
"gentoo base system": override_gentoo.GentooConfigurator,
"opensuse": override_suse.OpenSUSEConfigurator,
"suse": override_suse.OpenSUSEConfigurator,
+ "scientific": override_centos.CentOSConfigurator,
+ "scientific linux": override_centos.CentOSConfigurator,
}
| {"golden_diff": "diff --git a/certbot-apache/certbot_apache/entrypoint.py b/certbot-apache/certbot_apache/entrypoint.py\n--- a/certbot-apache/certbot_apache/entrypoint.py\n+++ b/certbot-apache/certbot_apache/entrypoint.py\n@@ -31,6 +31,8 @@\n \"gentoo base system\": override_gentoo.GentooConfigurator,\n \"opensuse\": override_suse.OpenSUSEConfigurator,\n \"suse\": override_suse.OpenSUSEConfigurator,\n+ \"scientific\": override_centos.CentOSConfigurator,\n+ \"scientific linux\": override_centos.CentOSConfigurator,\n }\n", "issue": "Certbot's Apache plugin doesn't work on Scientific Linux\nSee https://community.letsencrypt.org/t/noinstallationerror-cannot-find-apache-executable-apache2ctl/97980.\r\n\r\nThis should be fixable by adding an override in https://github.com/certbot/certbot/blob/master/certbot-apache/certbot_apache/entrypoint.py#L17.\n", "before_files": [{"content": "\"\"\" Entry point for Apache Plugin \"\"\"\n# Pylint does not like disutils.version when running inside a venv.\n# See: https://github.com/PyCQA/pylint/issues/73\nfrom distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error\n\nfrom certbot import util\n\nfrom certbot_apache import configurator\nfrom certbot_apache import override_arch\nfrom certbot_apache import override_fedora\nfrom certbot_apache import override_darwin\nfrom certbot_apache import override_debian\nfrom certbot_apache import override_centos\nfrom certbot_apache import override_gentoo\nfrom certbot_apache import override_suse\n\nOVERRIDE_CLASSES = {\n \"arch\": override_arch.ArchConfigurator,\n \"darwin\": override_darwin.DarwinConfigurator,\n \"debian\": override_debian.DebianConfigurator,\n \"ubuntu\": override_debian.DebianConfigurator,\n \"centos\": override_centos.CentOSConfigurator,\n \"centos linux\": override_centos.CentOSConfigurator,\n \"fedora_old\": override_centos.CentOSConfigurator,\n \"fedora\": override_fedora.FedoraConfigurator,\n \"ol\": override_centos.CentOSConfigurator,\n \"red hat enterprise linux server\": override_centos.CentOSConfigurator,\n \"rhel\": override_centos.CentOSConfigurator,\n \"amazon\": override_centos.CentOSConfigurator,\n \"gentoo\": override_gentoo.GentooConfigurator,\n \"gentoo base system\": override_gentoo.GentooConfigurator,\n \"opensuse\": override_suse.OpenSUSEConfigurator,\n \"suse\": override_suse.OpenSUSEConfigurator,\n}\n\n\ndef get_configurator():\n \"\"\" Get correct configurator class based on the OS fingerprint \"\"\"\n os_name, os_version = util.get_os_info()\n os_name = os_name.lower()\n override_class = None\n\n # Special case for older Fedora versions\n if os_name == 'fedora' and LooseVersion(os_version) < LooseVersion('29'):\n os_name = 'fedora_old'\n\n try:\n override_class = OVERRIDE_CLASSES[os_name]\n except KeyError:\n # OS not found in the list\n os_like = util.get_systemd_os_like()\n if os_like:\n for os_name in os_like:\n if os_name in OVERRIDE_CLASSES.keys():\n override_class = OVERRIDE_CLASSES[os_name]\n if not override_class:\n # No override class found, return the generic configurator\n override_class = configurator.ApacheConfigurator\n return override_class\n\n\nENTRYPOINT = get_configurator()\n", "path": "certbot-apache/certbot_apache/entrypoint.py"}]} | 1,340 | 156 |
gh_patches_debug_4216 | rasdani/github-patches | git_diff | great-expectations__great_expectations-4055 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
get_validator method do not work
Hello!
I have a problem with get_validator component.
Here’s my code:
```
batch_request = BatchRequest(
datasource_name="redshift_",
data_connector_name="default_inferred_data_connector_name",
data_asset_name="daily_chargeback_table_v1", # this is the name of the table you want to retrieve
)
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
```
I get this exception:
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-67-16f90e0aa558> in <module>
8 )
9 validator = context.get_validator(
---> 10 batch_request=batch_request, expectation_suite_name="test_suite"
11 )
12 print(validator.head())
.
.
.
~/anaconda3/lib/python3.7/site-packages/great_expectations/execution_engine/sqlalchemy_execution_engine.py in _build_selectable_from_batch_spec(self, batch_spec)
979 )
980 .where(
--> 981 sa.and_(
982 split_clause,
983 sampler_fn(**batch_spec["sampling_kwargs"]),
TypeError: table() got an unexpected keyword argument 'schema'
```
My Datasource configuration like:
```
name: redshift_
class_name: Datasource
execution_engine:
class_name: SqlAlchemyExecutionEngine
credentials:
host: redshift_host
port: '5443'
username: username
password: password
database: dbname
query:
sslmode: prefer
drivername: postgresql+psycopg2
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
default_inferred_data_connector_name:
class_name: InferredAssetSqlDataConnector
name: whole_table
```
My environment:
MacOS
python 3.7.4
great_expectations 0.13.34
I will be grateful for any help.
</issue>
<code>
[start of setup.py]
1 from setuptools import find_packages, setup
2
3 import versioneer
4
5 # Parse requirements.txt
6 with open("requirements.txt") as f:
7 required = f.read().splitlines()
8
9 # try:
10 # import pypandoc
11 # long_description = pypandoc.convert_file('README.md', 'rst')
12 # except (IOError, ImportError):
13 long_description = "Always know what to expect from your data. (See https://github.com/great-expectations/great_expectations for full description)."
14
15 config = {
16 "description": "Always know what to expect from your data.",
17 "author": "The Great Expectations Team",
18 "url": "https://github.com/great-expectations/great_expectations",
19 "author_email": "[email protected]",
20 "version": versioneer.get_version(),
21 "cmdclass": versioneer.get_cmdclass(),
22 "install_requires": required,
23 "extras_require": {
24 "spark": ["pyspark>=2.3.2"],
25 "sqlalchemy": ["sqlalchemy>=1.3.16"],
26 "airflow": ["apache-airflow[s3]>=1.9.0", "boto3>=1.7.3"],
27 "gcp": [
28 "google-cloud>=0.34.0",
29 "google-cloud-storage>=1.28.0",
30 "google-cloud-secret-manager>=1.0.0",
31 "pybigquery==0.4.15",
32 ],
33 "redshift": ["psycopg2>=2.8"],
34 "s3": ["boto3>=1.14"],
35 "aws_secrets": ["boto3>=1.8.7"],
36 "azure_secrets": ["azure-identity>=1.0.0", "azure-keyvault-secrets>=4.0.0"],
37 "snowflake": ["snowflake-sqlalchemy>=1.2"],
38 },
39 "packages": find_packages(exclude=["contrib*", "docs*", "tests*", "examples*"]),
40 "entry_points": {
41 "console_scripts": ["great_expectations=great_expectations.cli:main"]
42 },
43 "name": "great_expectations",
44 "long_description": long_description,
45 "license": "Apache-2.0",
46 "keywords": "data science testing pipeline data quality dataquality validation datavalidation",
47 "include_package_data": True,
48 "classifiers": [
49 "Development Status :: 4 - Beta",
50 "Intended Audience :: Developers",
51 "Intended Audience :: Science/Research",
52 "Intended Audience :: Other Audience",
53 "Topic :: Scientific/Engineering",
54 "Topic :: Software Development",
55 "Topic :: Software Development :: Testing",
56 "License :: OSI Approved :: Apache Software License",
57 "Programming Language :: Python :: 3",
58 "Programming Language :: Python :: 3.6",
59 "Programming Language :: Python :: 3.7",
60 "Programming Language :: Python :: 3.8",
61 "Programming Language :: Python :: 3.9",
62 ],
63 }
64
65 setup(**config)
66
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@
"install_requires": required,
"extras_require": {
"spark": ["pyspark>=2.3.2"],
- "sqlalchemy": ["sqlalchemy>=1.3.16"],
+ "sqlalchemy": ["sqlalchemy>=1.3.18"],
"airflow": ["apache-airflow[s3]>=1.9.0", "boto3>=1.7.3"],
"gcp": [
"google-cloud>=0.34.0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,7 +22,7 @@\n \"install_requires\": required,\n \"extras_require\": {\n \"spark\": [\"pyspark>=2.3.2\"],\n- \"sqlalchemy\": [\"sqlalchemy>=1.3.16\"],\n+ \"sqlalchemy\": [\"sqlalchemy>=1.3.18\"],\n \"airflow\": [\"apache-airflow[s3]>=1.9.0\", \"boto3>=1.7.3\"],\n \"gcp\": [\n \"google-cloud>=0.34.0\",\n", "issue": "get_validator method do not work\nHello!\r\nI have a problem with get_validator component.\r\n\r\nHere\u2019s my code:\r\n```\r\nbatch_request = BatchRequest(\r\n datasource_name=\"redshift_\",\r\n data_connector_name=\"default_inferred_data_connector_name\",\r\n data_asset_name=\"daily_chargeback_table_v1\", # this is the name of the table you want to retrieve\r\n)\r\ncontext.create_expectation_suite(\r\n expectation_suite_name=\"test_suite\", overwrite_existing=True\r\n)\r\nvalidator = context.get_validator(\r\n batch_request=batch_request, expectation_suite_name=\"test_suite\"\r\n)\r\nprint(validator.head())\r\n```\r\n\r\nI get this exception:\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-67-16f90e0aa558> in <module>\r\n 8 )\r\n 9 validator = context.get_validator(\r\n---> 10 batch_request=batch_request, expectation_suite_name=\"test_suite\"\r\n 11 )\r\n 12 print(validator.head())\r\n.\r\n.\r\n.\r\n\r\n~/anaconda3/lib/python3.7/site-packages/great_expectations/execution_engine/sqlalchemy_execution_engine.py in _build_selectable_from_batch_spec(self, batch_spec)\r\n 979 )\r\n 980 .where(\r\n--> 981 sa.and_(\r\n 982 split_clause,\r\n 983 sampler_fn(**batch_spec[\"sampling_kwargs\"]),\r\nTypeError: table() got an unexpected keyword argument 'schema'\r\n```\r\n\r\nMy Datasource configuration like:\r\n```\r\nname: redshift_\r\nclass_name: Datasource\r\nexecution_engine:\r\n class_name: SqlAlchemyExecutionEngine\r\n credentials:\r\n host: redshift_host\r\n port: '5443'\r\n username: username\r\n password: password\r\n database: dbname\r\n query:\r\n sslmode: prefer\r\n drivername: postgresql+psycopg2\r\ndata_connectors:\r\n default_runtime_data_connector_name:\r\n class_name: RuntimeDataConnector\r\n batch_identifiers:\r\n - default_identifier_name\r\n default_inferred_data_connector_name:\r\n class_name: InferredAssetSqlDataConnector\r\n name: whole_table\r\n```\r\n\r\nMy environment:\r\nMacOS\r\npython 3.7.4\r\ngreat_expectations 0.13.34\r\n\r\nI will be grateful for any help.\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nimport versioneer\n\n# Parse requirements.txt\nwith open(\"requirements.txt\") as f:\n required = f.read().splitlines()\n\n# try:\n# import pypandoc\n# long_description = pypandoc.convert_file('README.md', 'rst')\n# except (IOError, ImportError):\nlong_description = \"Always know what to expect from your data. (See https://github.com/great-expectations/great_expectations for full description).\"\n\nconfig = {\n \"description\": \"Always know what to expect from your data.\",\n \"author\": \"The Great Expectations Team\",\n \"url\": \"https://github.com/great-expectations/great_expectations\",\n \"author_email\": \"[email protected]\",\n \"version\": versioneer.get_version(),\n \"cmdclass\": versioneer.get_cmdclass(),\n \"install_requires\": required,\n \"extras_require\": {\n \"spark\": [\"pyspark>=2.3.2\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.3.16\"],\n \"airflow\": [\"apache-airflow[s3]>=1.9.0\", \"boto3>=1.7.3\"],\n \"gcp\": [\n \"google-cloud>=0.34.0\",\n \"google-cloud-storage>=1.28.0\",\n \"google-cloud-secret-manager>=1.0.0\",\n \"pybigquery==0.4.15\",\n ],\n \"redshift\": [\"psycopg2>=2.8\"],\n \"s3\": [\"boto3>=1.14\"],\n \"aws_secrets\": [\"boto3>=1.8.7\"],\n \"azure_secrets\": [\"azure-identity>=1.0.0\", \"azure-keyvault-secrets>=4.0.0\"],\n \"snowflake\": [\"snowflake-sqlalchemy>=1.2\"],\n },\n \"packages\": find_packages(exclude=[\"contrib*\", \"docs*\", \"tests*\", \"examples*\"]),\n \"entry_points\": {\n \"console_scripts\": [\"great_expectations=great_expectations.cli:main\"]\n },\n \"name\": \"great_expectations\",\n \"long_description\": long_description,\n \"license\": \"Apache-2.0\",\n \"keywords\": \"data science testing pipeline data quality dataquality validation datavalidation\",\n \"include_package_data\": True,\n \"classifiers\": [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Other Audience\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Testing\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n}\n\nsetup(**config)\n", "path": "setup.py"}]} | 1,807 | 142 |
gh_patches_debug_2981 | rasdani/github-patches | git_diff | aws__aws-cli-573 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
aws ec2 replace-network-acl-entry --protocol ?
How can I specify a protocol? When I specify --protocol tcp or --protocol udp, the command fails:
A client error (InvalidParameterValue) occurred when calling the ReplaceNetworkAclEntry operation: Invalid value 'tcp' for IP protocol. Unknown protocol.
A client error (InvalidParameterValue) occurred when calling the ReplaceNetworkAclEntry operation: Invalid value 'udp' for IP protocol. Unknown protocol.
The command create-network-acl-entry accepts --protocol tcp or --protocol udp.
</issue>
<code>
[start of awscli/customizations/ec2protocolarg.py]
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """
14 This customization allows the user to specify the values "tcp", "udp",
15 or "icmp" as values for the --protocol parameter. The actual Protocol
16 parameter of the operation accepts only integer protocol numbers.
17 """
18
19 def _fix_args(operation, endpoint, params, **kwargs):
20 if 'protocol' in params:
21 if params['protocol'] == 'tcp':
22 params['protocol'] = '6'
23 elif params['protocol'] == 'udp':
24 params['protocol'] = '17'
25 elif params['protocol'] == 'icmp':
26 params['protocol'] = '1'
27 elif params['protocol'] == 'all':
28 params['protocol'] = '-1'
29
30
31 def register_protocol_args(cli):
32 ('before-parameter-build.ec2.RunInstances', _fix_args),
33 cli.register('before-parameter-build.ec2.CreateNetworkAclEntry',
34 _fix_args)
35
36
[end of awscli/customizations/ec2protocolarg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/ec2protocolarg.py b/awscli/customizations/ec2protocolarg.py
--- a/awscli/customizations/ec2protocolarg.py
+++ b/awscli/customizations/ec2protocolarg.py
@@ -29,7 +29,8 @@
def register_protocol_args(cli):
- ('before-parameter-build.ec2.RunInstances', _fix_args),
cli.register('before-parameter-build.ec2.CreateNetworkAclEntry',
_fix_args)
+ cli.register('before-parameter-build.ec2.ReplaceNetworkAclEntry',
+ _fix_args)
| {"golden_diff": "diff --git a/awscli/customizations/ec2protocolarg.py b/awscli/customizations/ec2protocolarg.py\n--- a/awscli/customizations/ec2protocolarg.py\n+++ b/awscli/customizations/ec2protocolarg.py\n@@ -29,7 +29,8 @@\n \n \n def register_protocol_args(cli):\n- ('before-parameter-build.ec2.RunInstances', _fix_args),\n cli.register('before-parameter-build.ec2.CreateNetworkAclEntry',\n _fix_args)\n+ cli.register('before-parameter-build.ec2.ReplaceNetworkAclEntry',\n+ _fix_args)\n", "issue": "aws ec2 replace-network-acl-entry --protocol ?\nHow can I specify a protocol? When I specify --protocol tcp or --protocol udp, the command fails:\n\nA client error (InvalidParameterValue) occurred when calling the ReplaceNetworkAclEntry operation: Invalid value 'tcp' for IP protocol. Unknown protocol.\n\nA client error (InvalidParameterValue) occurred when calling the ReplaceNetworkAclEntry operation: Invalid value 'udp' for IP protocol. Unknown protocol.\n\nThe command create-network-acl-entry accepts --protocol tcp or --protocol udp.\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"\nThis customization allows the user to specify the values \"tcp\", \"udp\",\nor \"icmp\" as values for the --protocol parameter. The actual Protocol\nparameter of the operation accepts only integer protocol numbers.\n\"\"\"\n\ndef _fix_args(operation, endpoint, params, **kwargs):\n if 'protocol' in params:\n if params['protocol'] == 'tcp':\n params['protocol'] = '6'\n elif params['protocol'] == 'udp':\n params['protocol'] = '17'\n elif params['protocol'] == 'icmp':\n params['protocol'] = '1'\n elif params['protocol'] == 'all':\n params['protocol'] = '-1'\n\n\ndef register_protocol_args(cli):\n ('before-parameter-build.ec2.RunInstances', _fix_args),\n cli.register('before-parameter-build.ec2.CreateNetworkAclEntry',\n _fix_args)\n \n", "path": "awscli/customizations/ec2protocolarg.py"}]} | 1,038 | 125 |
gh_patches_debug_15624 | rasdani/github-patches | git_diff | saulpw__visidata-1960 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Current HEAD zsh-completion.py needs option_aliases update
**Small description**
`option_aliases` was removed in ce497f444db6d2f3fc0b8309f5ca839196c33c8b but is still referred to in the zsh completion code.
https://github.com/saulpw/visidata/blob/34808745232e798b0f25e893bb444fc9f3c034eb/dev/zsh-completion.py#L11C41-L11C41
I think the script needs a slight rejig to use the (present) `vd` import instead.
I wonder whether this can be included in future CI?
**Expected result**
The command succeeds.
**Actual result**
```
> /build/visidata-src
> Traceback (most recent call last):
> File "/build/visidata-src/dev/zsh-completion.py", line 11, in <module>
> from visidata.main import option_aliases
> ImportError: cannot import name 'option_aliases' from 'visidata.main' (/build/visidata-src/visidata/main.py)
```
**Steps to reproduce**
```
python dev/zsh-completion.py
```
**Additional context**
~~Please include the version of VisiData and Python.~~
https://github.com/saulpw/visidata/tree/34808745232e798b0f25e893bb444fc9f3c034eb but I listed the commit above that causes the breakage — I suspect this is a two minute fix for somebody familiar with the codebase, though not me. I can help with extending CI, though it might just be a case of adding
```yaml
- name: Ensure VisiData can create completions
run: python dev/zsh-completion.py
```
(I guess you might want to run a linter, instead.)
</issue>
<code>
[start of dev/zsh-completion.py]
1 #!/usr/bin/env python
2 from __future__ import unicode_literals
3
4 import os
5 from os.path import dirname as dirn
6 import sys
7 import re
8
9 sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
10 from visidata import vd
11 from visidata.main import option_aliases
12
13 ZSH_COMPLETION_FILE = "_visidata"
14 ZSH_COMPLETION_TEMPLATE = "dev/zsh-completion.in"
15 pat_class = re.compile("'(.*)'")
16 pat_select = re.compile("^\([^)]*\)")
17
18
19 def generate_completion(opt):
20 prefix = "--" + opt.name
21 shortnames = [key for key, value in option_aliases.items() if value[0] == opt.name]
22 if len(shortnames):
23 if len(shortnames[0]) == 1:
24 shortname = "-" + shortnames[0]
25 else:
26 shortname = "--" + shortnames[0]
27 prefix = "{" + f"{shortname},{prefix}" + "}"
28 if isinstance(opt.value, bool):
29 completion = ""
30 else:
31 completion = ":" + pat_class.findall(str(opt.value.__class__))[0]
32 if opt.name in ["play", "output", "visidata_dir", "config"]:
33 completion += ":_files"
34 elif opt.name in ["plugins_url", "motd_url"]:
35 completion += ":_urls"
36 helpstr = opt.helpstr
37 selections = pat_select.findall(helpstr)
38 if len(selections):
39 completion += f":{selections[0].replace('/', ' ')}"
40 # TODO: use `zstyle ':completion:*' extra-verbose true`
41 # to control the display of default value
42 helpstr = helpstr + f" (default: {opt.value})"
43 helpstr = helpstr.replace("[", "\\[").replace("]", "\\]")
44 return f"{prefix}'[{helpstr}]{completion}'"
45
46
47 flags = [generate_completion(vd._options[opt]["default"]) for opt in vd._options]
48
49 with open(ZSH_COMPLETION_TEMPLATE) as f:
50 template = f.read()
51
52 template = template.replace("{{flags}}", " \\\n ".join(flags))
53
54 with open(ZSH_COMPLETION_FILE, "w") as f:
55 f.write(template)
56
[end of dev/zsh-completion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dev/zsh-completion.py b/dev/zsh-completion.py
--- a/dev/zsh-completion.py
+++ b/dev/zsh-completion.py
@@ -8,7 +8,6 @@
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
from visidata import vd
-from visidata.main import option_aliases
ZSH_COMPLETION_FILE = "_visidata"
ZSH_COMPLETION_TEMPLATE = "dev/zsh-completion.in"
@@ -18,7 +17,9 @@
def generate_completion(opt):
prefix = "--" + opt.name
- shortnames = [key for key, value in option_aliases.items() if value[0] == opt.name]
+ shortnames = [
+ key for key, value in vd.option_aliases.items() if value[0] == opt.name
+ ]
if len(shortnames):
if len(shortnames[0]) == 1:
shortname = "-" + shortnames[0]
| {"golden_diff": "diff --git a/dev/zsh-completion.py b/dev/zsh-completion.py\n--- a/dev/zsh-completion.py\n+++ b/dev/zsh-completion.py\n@@ -8,7 +8,6 @@\n \n sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))\n from visidata import vd\n-from visidata.main import option_aliases\n \n ZSH_COMPLETION_FILE = \"_visidata\"\n ZSH_COMPLETION_TEMPLATE = \"dev/zsh-completion.in\"\n@@ -18,7 +17,9 @@\n \n def generate_completion(opt):\n prefix = \"--\" + opt.name\n- shortnames = [key for key, value in option_aliases.items() if value[0] == opt.name]\n+ shortnames = [\n+ key for key, value in vd.option_aliases.items() if value[0] == opt.name\n+ ]\n if len(shortnames):\n if len(shortnames[0]) == 1:\n shortname = \"-\" + shortnames[0]\n", "issue": "Current HEAD zsh-completion.py needs option_aliases update\n**Small description**\r\n\r\n`option_aliases` was removed in ce497f444db6d2f3fc0b8309f5ca839196c33c8b but is still referred to in the zsh completion code.\r\n\r\nhttps://github.com/saulpw/visidata/blob/34808745232e798b0f25e893bb444fc9f3c034eb/dev/zsh-completion.py#L11C41-L11C41\r\n\r\nI think the script needs a slight rejig to use the (present) `vd` import instead.\r\n\r\nI wonder whether this can be included in future CI?\r\n\r\n**Expected result**\r\n\r\nThe command succeeds.\r\n\r\n**Actual result**\r\n\r\n```\r\n > /build/visidata-src\r\n > Traceback (most recent call last):\r\n > File \"/build/visidata-src/dev/zsh-completion.py\", line 11, in <module>\r\n > from visidata.main import option_aliases\r\n > ImportError: cannot import name 'option_aliases' from 'visidata.main' (/build/visidata-src/visidata/main.py)\r\n```\r\n\r\n**Steps to reproduce**\r\n\r\n```\r\npython dev/zsh-completion.py\r\n```\r\n\r\n**Additional context**\r\n~~Please include the version of VisiData and Python.~~\r\n\r\nhttps://github.com/saulpw/visidata/tree/34808745232e798b0f25e893bb444fc9f3c034eb but I listed the commit above that causes the breakage \u2014 I suspect this is a two minute fix for somebody familiar with the codebase, though not me. I can help with extending CI, though it might just be a case of adding\r\n\r\n```yaml\r\n - name: Ensure VisiData can create completions\r\n run: python dev/zsh-completion.py\r\n```\r\n\r\n(I guess you might want to run a linter, instead.)\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\n\nimport os\nfrom os.path import dirname as dirn\nimport sys\nimport re\n\nsys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))\nfrom visidata import vd\nfrom visidata.main import option_aliases\n\nZSH_COMPLETION_FILE = \"_visidata\"\nZSH_COMPLETION_TEMPLATE = \"dev/zsh-completion.in\"\npat_class = re.compile(\"'(.*)'\")\npat_select = re.compile(\"^\\([^)]*\\)\")\n\n\ndef generate_completion(opt):\n prefix = \"--\" + opt.name\n shortnames = [key for key, value in option_aliases.items() if value[0] == opt.name]\n if len(shortnames):\n if len(shortnames[0]) == 1:\n shortname = \"-\" + shortnames[0]\n else:\n shortname = \"--\" + shortnames[0]\n prefix = \"{\" + f\"{shortname},{prefix}\" + \"}\"\n if isinstance(opt.value, bool):\n completion = \"\"\n else:\n completion = \":\" + pat_class.findall(str(opt.value.__class__))[0]\n if opt.name in [\"play\", \"output\", \"visidata_dir\", \"config\"]:\n completion += \":_files\"\n elif opt.name in [\"plugins_url\", \"motd_url\"]:\n completion += \":_urls\"\n helpstr = opt.helpstr\n selections = pat_select.findall(helpstr)\n if len(selections):\n completion += f\":{selections[0].replace('/', ' ')}\"\n # TODO: use `zstyle ':completion:*' extra-verbose true`\n # to control the display of default value\n helpstr = helpstr + f\" (default: {opt.value})\"\n helpstr = helpstr.replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\")\n return f\"{prefix}'[{helpstr}]{completion}'\"\n\n\nflags = [generate_completion(vd._options[opt][\"default\"]) for opt in vd._options]\n\nwith open(ZSH_COMPLETION_TEMPLATE) as f:\n template = f.read()\n\ntemplate = template.replace(\"{{flags}}\", \" \\\\\\n \".join(flags))\n\nwith open(ZSH_COMPLETION_FILE, \"w\") as f:\n f.write(template)\n", "path": "dev/zsh-completion.py"}]} | 1,580 | 218 |
gh_patches_debug_19256 | rasdani/github-patches | git_diff | e2nIEE__pandapower-275 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecation warning for asmatrix
The usage of numpy asmatrix raises Deprecation Warnings in numpy 1.15.4:
PendingDeprecationWarning: the matrix subclass is not the recommended way to represent matrices or deal with linear algebra (see https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). Please adjust your code to use regular ndarray.
There are 5 occurences in pandapower/pf/dSbus_dV_pypower.py.
</issue>
<code>
[start of pandapower/pf/dSbus_dV_pypower.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright 1996-2015 PSERC. All rights reserved.
4 # Use of this source code is governed by a BSD-style
5 # license that can be found in the LICENSE file.
6
7 # Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics
8 # and Energy System Technology (IEE), Kassel. All rights reserved.
9
10
11
12 """Computes partial derivatives of power injection w.r.t. voltage.
13 """
14
15 from numpy import conj, diag, asmatrix, asarray, zeros
16 from scipy.sparse import issparse, csr_matrix as sparse
17
18
19 def dSbus_dV(Ybus, V):
20 """Computes partial derivatives of power injection w.r.t. voltage.
21 """
22
23 if issparse(Ybus):
24 return dSbus_dV_sparse(Ybus, V)
25 else:
26 return dSbus_dV_dense(Ybus, V)
27
28
29 def dSbus_dV_sparse(Ybus, V):
30 Ibus = Ybus * V
31 ib = range(len(V))
32 diagV = sparse((V, (ib, ib)))
33 diagIbus = sparse((Ibus, (ib, ib)))
34 diagVnorm = sparse((V / abs(V), (ib, ib)))
35 dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm
36 dS_dVa = 1j * diagV * conj(diagIbus - Ybus * diagV)
37 return dS_dVm, dS_dVa
38
39
40 def dSbus_dV_dense(Ybus, V):
41 # standard code from Pypower (slower than above)
42 Ibus = Ybus * asmatrix(V).T
43
44 diagV = asmatrix(diag(V))
45 diagIbus = asmatrix(diag(asarray(Ibus).flatten()))
46 diagVnorm = asmatrix(diag(V / abs(V)))
47
48 dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm
49 dS_dVa = 1j * diagV * conj(diagIbus - Ybus * diagV)
50 return dS_dVm, dS_dVa
51
[end of pandapower/pf/dSbus_dV_pypower.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pandapower/pf/dSbus_dV_pypower.py b/pandapower/pf/dSbus_dV_pypower.py
--- a/pandapower/pf/dSbus_dV_pypower.py
+++ b/pandapower/pf/dSbus_dV_pypower.py
@@ -12,7 +12,7 @@
"""Computes partial derivatives of power injection w.r.t. voltage.
"""
-from numpy import conj, diag, asmatrix, asarray, zeros
+from numpy import conj, diag, asarray
from scipy.sparse import issparse, csr_matrix as sparse
@@ -39,11 +39,11 @@
def dSbus_dV_dense(Ybus, V):
# standard code from Pypower (slower than above)
- Ibus = Ybus * asmatrix(V).T
+ Ibus = Ybus * asarray(V).T
- diagV = asmatrix(diag(V))
- diagIbus = asmatrix(diag(asarray(Ibus).flatten()))
- diagVnorm = asmatrix(diag(V / abs(V)))
+ diagV = asarray(diag(V))
+ diagIbus = asarray(diag(asarray(Ibus).flatten()))
+ diagVnorm = asarray(diag(V / abs(V)))
dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm
dS_dVa = 1j * diagV * conj(diagIbus - Ybus * diagV)
| {"golden_diff": "diff --git a/pandapower/pf/dSbus_dV_pypower.py b/pandapower/pf/dSbus_dV_pypower.py\n--- a/pandapower/pf/dSbus_dV_pypower.py\n+++ b/pandapower/pf/dSbus_dV_pypower.py\n@@ -12,7 +12,7 @@\n \"\"\"Computes partial derivatives of power injection w.r.t. voltage.\n \"\"\"\n \n-from numpy import conj, diag, asmatrix, asarray, zeros\n+from numpy import conj, diag, asarray\n from scipy.sparse import issparse, csr_matrix as sparse\n \n \n@@ -39,11 +39,11 @@\n \n def dSbus_dV_dense(Ybus, V):\n # standard code from Pypower (slower than above)\n- Ibus = Ybus * asmatrix(V).T\n+ Ibus = Ybus * asarray(V).T\n \n- diagV = asmatrix(diag(V))\n- diagIbus = asmatrix(diag(asarray(Ibus).flatten()))\n- diagVnorm = asmatrix(diag(V / abs(V)))\n+ diagV = asarray(diag(V))\n+ diagIbus = asarray(diag(asarray(Ibus).flatten()))\n+ diagVnorm = asarray(diag(V / abs(V)))\n \n dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm\n dS_dVa = 1j * diagV * conj(diagIbus - Ybus * diagV)\n", "issue": "Deprecation warning for asmatrix\nThe usage of numpy asmatrix raises Deprecation Warnings in numpy 1.15.4:\r\nPendingDeprecationWarning: the matrix subclass is not the recommended way to represent matrices or deal with linear algebra (see https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). Please adjust your code to use regular ndarray.\r\nThere are 5 occurences in pandapower/pf/dSbus_dV_pypower.py.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 1996-2015 PSERC. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\n# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\n\n\"\"\"Computes partial derivatives of power injection w.r.t. voltage.\n\"\"\"\n\nfrom numpy import conj, diag, asmatrix, asarray, zeros\nfrom scipy.sparse import issparse, csr_matrix as sparse\n\n\ndef dSbus_dV(Ybus, V):\n \"\"\"Computes partial derivatives of power injection w.r.t. voltage.\n \"\"\"\n\n if issparse(Ybus):\n return dSbus_dV_sparse(Ybus, V)\n else:\n return dSbus_dV_dense(Ybus, V)\n\n\ndef dSbus_dV_sparse(Ybus, V):\n Ibus = Ybus * V\n ib = range(len(V))\n diagV = sparse((V, (ib, ib)))\n diagIbus = sparse((Ibus, (ib, ib)))\n diagVnorm = sparse((V / abs(V), (ib, ib)))\n dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm\n dS_dVa = 1j * diagV * conj(diagIbus - Ybus * diagV)\n return dS_dVm, dS_dVa\n\n\ndef dSbus_dV_dense(Ybus, V):\n # standard code from Pypower (slower than above)\n Ibus = Ybus * asmatrix(V).T\n\n diagV = asmatrix(diag(V))\n diagIbus = asmatrix(diag(asarray(Ibus).flatten()))\n diagVnorm = asmatrix(diag(V / abs(V)))\n\n dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm\n dS_dVa = 1j * diagV * conj(diagIbus - Ybus * diagV)\n return dS_dVm, dS_dVa\n", "path": "pandapower/pf/dSbus_dV_pypower.py"}]} | 1,254 | 350 |
gh_patches_debug_25881 | rasdani/github-patches | git_diff | translate__pootle-4060 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
merge_user should also remove the old user
Currently `merge_user` does not actually remove the old user. You need to run `purge_user` following `merge_user` to completely remove the user. This is dangerous and error prone, especially on older instances that have a large number of users.
</issue>
<code>
[start of pootle/apps/accounts/management/commands/merge_user.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 import accounts
11
12 from . import UserCommand
13
14
15 class Command(UserCommand):
16 args = "user other_user"
17 help = "Merge user to other_user"
18
19 def handle(self, *args, **kwargs):
20 super(Command, self).handle(*args, **kwargs)
21 accounts.utils.UserMerger(self.get_user(username=args[0]),
22 self.get_user(username=args[1])).merge()
23
[end of pootle/apps/accounts/management/commands/merge_user.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/accounts/management/commands/merge_user.py b/pootle/apps/accounts/management/commands/merge_user.py
--- a/pootle/apps/accounts/management/commands/merge_user.py
+++ b/pootle/apps/accounts/management/commands/merge_user.py
@@ -7,6 +7,8 @@
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
+from optparse import make_option
+
import accounts
from . import UserCommand
@@ -15,8 +17,22 @@
class Command(UserCommand):
args = "user other_user"
help = "Merge user to other_user"
+ shared_option_list = (
+ make_option("--no-delete",
+ dest='delete',
+ action="store_false",
+ default=True,
+ help="Don't delete user after merging."),
+ )
+ option_list = UserCommand.option_list + shared_option_list
def handle(self, *args, **kwargs):
super(Command, self).handle(*args, **kwargs)
- accounts.utils.UserMerger(self.get_user(username=args[0]),
+ src_user = self.get_user(username=args[0])
+ accounts.utils.UserMerger(src_user,
self.get_user(username=args[1])).merge()
+
+ if kwargs.get("delete"):
+ self.stdout.write("Deleting user: %s...\n" % src_user.username)
+ src_user.delete()
+ self.stdout.write("User deleted: %s\n" % src_user.username)
| {"golden_diff": "diff --git a/pootle/apps/accounts/management/commands/merge_user.py b/pootle/apps/accounts/management/commands/merge_user.py\n--- a/pootle/apps/accounts/management/commands/merge_user.py\n+++ b/pootle/apps/accounts/management/commands/merge_user.py\n@@ -7,6 +7,8 @@\n # or later license. See the LICENSE file for a copy of the license and the\n # AUTHORS file for copyright and authorship information.\n \n+from optparse import make_option\n+\n import accounts\n \n from . import UserCommand\n@@ -15,8 +17,22 @@\n class Command(UserCommand):\n args = \"user other_user\"\n help = \"Merge user to other_user\"\n+ shared_option_list = (\n+ make_option(\"--no-delete\",\n+ dest='delete',\n+ action=\"store_false\",\n+ default=True,\n+ help=\"Don't delete user after merging.\"),\n+ )\n+ option_list = UserCommand.option_list + shared_option_list\n \n def handle(self, *args, **kwargs):\n super(Command, self).handle(*args, **kwargs)\n- accounts.utils.UserMerger(self.get_user(username=args[0]),\n+ src_user = self.get_user(username=args[0])\n+ accounts.utils.UserMerger(src_user,\n self.get_user(username=args[1])).merge()\n+\n+ if kwargs.get(\"delete\"):\n+ self.stdout.write(\"Deleting user: %s...\\n\" % src_user.username)\n+ src_user.delete()\n+ self.stdout.write(\"User deleted: %s\\n\" % src_user.username)\n", "issue": "merge_user should also remove the old user\nCurrently `merge_user` does not actually remove the old user. You need to run `purge_user` following `merge_user` to completely remove the user. This is dangerous and error prone, especially on older instances that have a large number of users.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport accounts\n\nfrom . import UserCommand\n\n\nclass Command(UserCommand):\n args = \"user other_user\"\n help = \"Merge user to other_user\"\n\n def handle(self, *args, **kwargs):\n super(Command, self).handle(*args, **kwargs)\n accounts.utils.UserMerger(self.get_user(username=args[0]),\n self.get_user(username=args[1])).merge()\n", "path": "pootle/apps/accounts/management/commands/merge_user.py"}]} | 814 | 344 |
gh_patches_debug_41748 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-352 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support pushing docker image built by elasticdl client.
to run docker images in cloud environment, we need add step for pushing docker image to registry provided by cloud.
</issue>
<code>
[start of elasticdl/client/client.py]
1 import argparse
2 import os
3 import inspect
4 import tempfile
5 import time
6 import getpass
7 import sys
8 from string import Template
9 import docker
10 import yaml
11 from kubernetes.client.apis import core_v1_api
12 from kubernetes import config
13
14
15 def _m_file_in_docker(model_file):
16 return "/model/" + os.path.basename(model_file)
17
18 def _build_docker_image(
19 m_file, timestamp, image_base="elasticdl:dev"
20 ):
21 DOCKER_TEMPLATE = """
22 FROM {}
23 COPY {} {}
24 """
25
26 with tempfile.NamedTemporaryFile(mode="w+", delete=False) as df:
27 df.write(DOCKER_TEMPLATE.format(image_base, m_file, _m_file_in_docker(m_file)))
28
29 client = docker.APIClient(base_url="unix://var/run/docker.sock")
30 for line in client.build(
31 dockerfile=df.name, path=".", rm=True, tag="elasticdl:dev_" + str(timestamp)
32 ):
33 print(str(line, encoding="utf-8"))
34
35 # TODO: upload docker image to docker hub.
36
37 def _gen_master_def(model_file, argv, timestamp):
38 master_yaml = """
39 apiVersion: v1
40 kind: Pod
41 metadata:
42 name: elasticdl-master-{timestamp}
43 labels:
44 purpose: test-command
45 spec:
46 containers:
47 - name: elasticdl-master-{timestamp}
48 image: elasticdl:dev_{timestamp}
49 command: ["python"]
50 args: [
51 "-m", "elasticdl.master.main",
52 "--worker_image", "elasticdl:dev_{timestamp}",
53 "--model_file", "{m_file}"
54 ]
55 imagePullPolicy: Never
56 env:
57 - name: MY_POD_IP
58 valueFrom:
59 fieldRef:
60 fieldPath: status.podIP
61 restartPolicy: Never
62 """ .format(m_file=_m_file_in_docker(model_file), timestamp=timestamp)
63
64 master_def = yaml.safe_load(master_yaml)
65
66 # Build master arguments
67 master_def['spec']['containers'][0]['args'].extend(argv)
68 return master_def
69
70 def _submit(model_file, argv, timestamp):
71 master_def = _gen_master_def(model_file, argv, timestamp)
72 config.load_kube_config()
73 api = core_v1_api.CoreV1Api()
74 resp = api.create_namespaced_pod(body=master_def, namespace="default")
75 print("Master launched. status='%s'" % str(resp.status))
76
77 def main():
78 parser = argparse.ArgumentParser(description="ElasticDL Client")
79 # Rewrite model_file argument and pass all other arguments to master.
80 parser.add_argument("--model_file", help="Path to Model file", required=True)
81 args, argv = parser.parse_known_args()
82
83 timestamp = int(round(time.time() * 1000))
84 _build_docker_image(args.model_file, timestamp)
85 _submit(args.model_file, argv, timestamp)
86
87
88 if __name__ == "__main__":
89 main()
90
[end of elasticdl/client/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/client/client.py b/elasticdl/client/client.py
--- a/elasticdl/client/client.py
+++ b/elasticdl/client/client.py
@@ -16,7 +16,8 @@
return "/model/" + os.path.basename(model_file)
def _build_docker_image(
- m_file, timestamp, image_base="elasticdl:dev"
+ m_file, image_name, image_base="elasticdl:dev",
+ repository=None
):
DOCKER_TEMPLATE = """
FROM {}
@@ -28,13 +29,15 @@
client = docker.APIClient(base_url="unix://var/run/docker.sock")
for line in client.build(
- dockerfile=df.name, path=".", rm=True, tag="elasticdl:dev_" + str(timestamp)
+ dockerfile=df.name, path=".", rm=True, tag=image_name
):
print(str(line, encoding="utf-8"))
- # TODO: upload docker image to docker hub.
+ if repository != None:
+ for line in client.push(image_name, stream=True, decode=True):
+ print(line)
-def _gen_master_def(model_file, argv, timestamp):
+def _gen_master_def(image_name, model_file, argv, timestamp):
master_yaml = """
apiVersion: v1
kind: Pod
@@ -45,21 +48,21 @@
spec:
containers:
- name: elasticdl-master-{timestamp}
- image: elasticdl:dev_{timestamp}
+ image: {image_name}
command: ["python"]
args: [
"-m", "elasticdl.master.main",
- "--worker_image", "elasticdl:dev_{timestamp}",
+ "--worker_image", {image_name},
"--model_file", "{m_file}"
]
- imagePullPolicy: Never
+ imagePullPolicy: IfNotPresent
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
restartPolicy: Never
-""" .format(m_file=_m_file_in_docker(model_file), timestamp=timestamp)
+""" .format(m_file=_m_file_in_docker(model_file), image_name=image_name, timestamp=timestamp)
master_def = yaml.safe_load(master_yaml)
@@ -67,8 +70,8 @@
master_def['spec']['containers'][0]['args'].extend(argv)
return master_def
-def _submit(model_file, argv, timestamp):
- master_def = _gen_master_def(model_file, argv, timestamp)
+def _submit(image_name, model_file, argv, timestamp):
+ master_def = _gen_master_def(image_name, model_file, argv, timestamp)
config.load_kube_config()
api = core_v1_api.CoreV1Api()
resp = api.create_namespaced_pod(body=master_def, namespace="default")
@@ -78,11 +81,15 @@
parser = argparse.ArgumentParser(description="ElasticDL Client")
# Rewrite model_file argument and pass all other arguments to master.
parser.add_argument("--model_file", help="Path to Model file", required=True)
+ parser.add_argument("--image-base", help="Base image containing elasticdl runtime environment.")
+ parser.add_argument("--repository", help="The repository to push docker image to.")
args, argv = parser.parse_known_args()
- timestamp = int(round(time.time() * 1000))
- _build_docker_image(args.model_file, timestamp)
- _submit(args.model_file, argv, timestamp)
+ timestamp = str(int(round(time.time() * 1000)))
+ image_name = args.image_base + '_' + timestamp
+ _build_docker_image(args.model_file, image_name, image_base=args.image_base,
+ repository=args.repository)
+ _submit(image_name, args.model_file, argv, timestamp)
if __name__ == "__main__":
| {"golden_diff": "diff --git a/elasticdl/client/client.py b/elasticdl/client/client.py\n--- a/elasticdl/client/client.py\n+++ b/elasticdl/client/client.py\n@@ -16,7 +16,8 @@\n return \"/model/\" + os.path.basename(model_file)\n \n def _build_docker_image(\n- m_file, timestamp, image_base=\"elasticdl:dev\"\n+ m_file, image_name, image_base=\"elasticdl:dev\",\n+ repository=None\n ):\n DOCKER_TEMPLATE = \"\"\"\n FROM {}\n@@ -28,13 +29,15 @@\n \n client = docker.APIClient(base_url=\"unix://var/run/docker.sock\")\n for line in client.build(\n- dockerfile=df.name, path=\".\", rm=True, tag=\"elasticdl:dev_\" + str(timestamp)\n+ dockerfile=df.name, path=\".\", rm=True, tag=image_name\n ):\n print(str(line, encoding=\"utf-8\"))\n \n- # TODO: upload docker image to docker hub.\n+ if repository != None:\n+ for line in client.push(image_name, stream=True, decode=True):\n+ print(line)\n \n-def _gen_master_def(model_file, argv, timestamp):\n+def _gen_master_def(image_name, model_file, argv, timestamp):\n master_yaml = \"\"\"\n apiVersion: v1\n kind: Pod\n@@ -45,21 +48,21 @@\n spec:\n containers:\n - name: elasticdl-master-{timestamp}\n- image: elasticdl:dev_{timestamp}\n+ image: {image_name}\n command: [\"python\"]\n args: [\n \"-m\", \"elasticdl.master.main\",\n- \"--worker_image\", \"elasticdl:dev_{timestamp}\",\n+ \"--worker_image\", {image_name},\n \"--model_file\", \"{m_file}\"\n ]\n- imagePullPolicy: Never\n+ imagePullPolicy: IfNotPresent \n env:\n - name: MY_POD_IP\n valueFrom:\n fieldRef:\n fieldPath: status.podIP\n restartPolicy: Never\n-\"\"\" .format(m_file=_m_file_in_docker(model_file), timestamp=timestamp)\n+\"\"\" .format(m_file=_m_file_in_docker(model_file), image_name=image_name, timestamp=timestamp)\n \n master_def = yaml.safe_load(master_yaml)\n \n@@ -67,8 +70,8 @@\n master_def['spec']['containers'][0]['args'].extend(argv)\n return master_def\n \n-def _submit(model_file, argv, timestamp):\n- master_def = _gen_master_def(model_file, argv, timestamp)\n+def _submit(image_name, model_file, argv, timestamp):\n+ master_def = _gen_master_def(image_name, model_file, argv, timestamp)\n config.load_kube_config()\n api = core_v1_api.CoreV1Api()\n resp = api.create_namespaced_pod(body=master_def, namespace=\"default\")\n@@ -78,11 +81,15 @@\n parser = argparse.ArgumentParser(description=\"ElasticDL Client\")\n # Rewrite model_file argument and pass all other arguments to master.\n parser.add_argument(\"--model_file\", help=\"Path to Model file\", required=True)\n+ parser.add_argument(\"--image-base\", help=\"Base image containing elasticdl runtime environment.\")\n+ parser.add_argument(\"--repository\", help=\"The repository to push docker image to.\")\n args, argv = parser.parse_known_args()\n \n- timestamp = int(round(time.time() * 1000))\n- _build_docker_image(args.model_file, timestamp)\n- _submit(args.model_file, argv, timestamp) \n+ timestamp = str(int(round(time.time() * 1000)))\n+ image_name = args.image_base + '_' + timestamp \n+ _build_docker_image(args.model_file, image_name, image_base=args.image_base,\n+ repository=args.repository)\n+ _submit(image_name, args.model_file, argv, timestamp)\n \n \n if __name__ == \"__main__\":\n", "issue": "Support pushing docker image built by elasticdl client.\nto run docker images in cloud environment, we need add step for pushing docker image to registry provided by cloud.\n", "before_files": [{"content": "import argparse\nimport os\nimport inspect\nimport tempfile\nimport time\nimport getpass\nimport sys\nfrom string import Template\nimport docker\nimport yaml\nfrom kubernetes.client.apis import core_v1_api\nfrom kubernetes import config\n\n\ndef _m_file_in_docker(model_file):\n return \"/model/\" + os.path.basename(model_file)\n\ndef _build_docker_image(\n m_file, timestamp, image_base=\"elasticdl:dev\"\n):\n DOCKER_TEMPLATE = \"\"\"\nFROM {}\nCOPY {} {}\n\"\"\"\n\n with tempfile.NamedTemporaryFile(mode=\"w+\", delete=False) as df:\n df.write(DOCKER_TEMPLATE.format(image_base, m_file, _m_file_in_docker(m_file)))\n\n client = docker.APIClient(base_url=\"unix://var/run/docker.sock\")\n for line in client.build(\n dockerfile=df.name, path=\".\", rm=True, tag=\"elasticdl:dev_\" + str(timestamp)\n ):\n print(str(line, encoding=\"utf-8\"))\n\n # TODO: upload docker image to docker hub.\n\ndef _gen_master_def(model_file, argv, timestamp):\n master_yaml = \"\"\"\napiVersion: v1\nkind: Pod\nmetadata:\n name: elasticdl-master-{timestamp}\n labels:\n purpose: test-command\nspec:\n containers:\n - name: elasticdl-master-{timestamp}\n image: elasticdl:dev_{timestamp}\n command: [\"python\"]\n args: [\n \"-m\", \"elasticdl.master.main\",\n \"--worker_image\", \"elasticdl:dev_{timestamp}\",\n \"--model_file\", \"{m_file}\"\n ]\n imagePullPolicy: Never\n env:\n - name: MY_POD_IP\n valueFrom:\n fieldRef:\n fieldPath: status.podIP\n restartPolicy: Never\n\"\"\" .format(m_file=_m_file_in_docker(model_file), timestamp=timestamp)\n\n master_def = yaml.safe_load(master_yaml)\n\n # Build master arguments\n master_def['spec']['containers'][0]['args'].extend(argv)\n return master_def\n\ndef _submit(model_file, argv, timestamp):\n master_def = _gen_master_def(model_file, argv, timestamp)\n config.load_kube_config()\n api = core_v1_api.CoreV1Api()\n resp = api.create_namespaced_pod(body=master_def, namespace=\"default\")\n print(\"Master launched. status='%s'\" % str(resp.status))\n\ndef main():\n parser = argparse.ArgumentParser(description=\"ElasticDL Client\")\n # Rewrite model_file argument and pass all other arguments to master.\n parser.add_argument(\"--model_file\", help=\"Path to Model file\", required=True)\n args, argv = parser.parse_known_args()\n\n timestamp = int(round(time.time() * 1000))\n _build_docker_image(args.model_file, timestamp)\n _submit(args.model_file, argv, timestamp) \n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/client/client.py"}]} | 1,364 | 857 |
gh_patches_debug_15372 | rasdani/github-patches | git_diff | pwndbg__pwndbg-1269 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pwndbg.gdblib.arch.current is wrong if executed in a pwntools gdbscript command
TL;DR: This should not fail, but does so, because the `patch` command fetches `pwndbg.gdblib.arch.current` which for some reason is wrong/not updated.
```py
from pwn import *
gdbscript = '''
tbreak main
patch $rip 'xor rax, rax'
continue
'''
p = gdb.debug('/bin/ls', gdbscript=gdbscript)
p.interactive()
```
Stacktrace:
```
Traceback (most recent call last):
File "/home/dc/tools/pwndbg/pwndbg/commands/__init__.py", line 145, in __call__
return self.function(*args, **kwargs)
File "/home/dc/tools/pwndbg/pwndbg/commands/__init__.py", line 216, in _OnlyWhenRunning
return function(*a, **kw)
File "/home/dc/tools/pwndbg/pwndbg/commands/patch.py", line 25, in patch
new_mem = asm(ins, arch=pwndbg.gdblib.arch.current)
File "/home/dc/.virtualenvs/pwn/lib/python3.8/site-packages/pwnlib/context/__init__.py", line 1444, in setter
raise AttributeError("Invalid arch/bits combination: %s/%s" % (arch, bits))
AttributeError: Invalid arch/bits combination: i386/64
If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues
(Please don't forget to search if it hasn't been reported before)
To generate the report and open a browser, you may run `bugreport --run-browser`
PS: Pull requests are welcome
> /home/dc/.virtualenvs/pwn/lib/python3.8/site-packages/pwnlib/context/__init__.py(1444)setter()
-> raise AttributeError("Invalid arch/bits combination: %s/%s" % (arch, bits))
(Pdb) p arch, bits
('i386', 64)
(Pdb)
```
</issue>
<code>
[start of pwndbg/commands/patch.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 import argparse
4
5 from pwnlib.asm import asm
6 from pwnlib.asm import disasm
7
8 import pwndbg.color.message as message
9 import pwndbg.commands
10 import pwndbg.gdblib.memory
11 import pwndbg.lib.memoize
12
13 # Keep old patches made so we can revert them
14 patches = {}
15
16
17 parser = argparse.ArgumentParser(description="Patches given instruction with given code or bytes")
18 parser.add_argument("address", type=int, help="The address to patch")
19 parser.add_argument("ins", type=str, help="instruction[s]")
20
21
22 @pwndbg.commands.ArgparsedCommand(parser)
23 @pwndbg.commands.OnlyWhenRunning
24 def patch(address, ins):
25 new_mem = asm(ins, arch=pwndbg.gdblib.arch.current)
26
27 old_mem = pwndbg.gdblib.memory.read(address, len(new_mem))
28
29 patches[address] = (old_mem, new_mem)
30
31 pwndbg.gdblib.memory.write(address, new_mem)
32
33 pwndbg.lib.memoize.reset()
34
35
36 parser2 = argparse.ArgumentParser(description="Revert patch at given address")
37 parser2.add_argument("address", type=int, help="Address to revert patch on")
38
39
40 @pwndbg.commands.ArgparsedCommand(parser2)
41 @pwndbg.commands.OnlyWhenRunning
42 def patch_revert(address):
43 if not patches:
44 print(message.notice("No patches to revert"))
45 return
46
47 if address == -1:
48 for addr, (old, _new) in patches.items():
49 pwndbg.gdblib.memory.write(addr, old)
50 print(message.notice("Reverted patch at %#x" % addr))
51 patches.clear()
52 else:
53 old, _new = patches[address]
54 pwndbg.gdblib.memory.write(address, old)
55
56 pwndbg.lib.memoize.reset()
57
58
59 parser3 = argparse.ArgumentParser(description="List all patches")
60
61
62 @pwndbg.commands.ArgparsedCommand(parser3)
63 @pwndbg.commands.OnlyWhenRunning
64 def patch_list():
65 if not patches:
66 print(message.hint("No patches to list"))
67 return
68
69 print(message.hint("Patches:"))
70 for addr, (old, new) in patches.items():
71 old_insns = disasm(old, arch=pwndbg.gdblib.arch.current)
72 new_insns = disasm(new, arch=pwndbg.gdblib.arch.current)
73
74 print(
75 message.hint("Patch at"),
76 message.warning("%#x:" % addr),
77 message.hint("from"),
78 message.warning(old_insns.replace("\n", "; ")),
79 message.hint("to"),
80 message.warning(new_insns.replace("\n", "; ")),
81 )
82
[end of pwndbg/commands/patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwndbg/commands/patch.py b/pwndbg/commands/patch.py
--- a/pwndbg/commands/patch.py
+++ b/pwndbg/commands/patch.py
@@ -22,7 +22,7 @@
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def patch(address, ins):
- new_mem = asm(ins, arch=pwndbg.gdblib.arch.current)
+ new_mem = asm(ins)
old_mem = pwndbg.gdblib.memory.read(address, len(new_mem))
@@ -68,8 +68,8 @@
print(message.hint("Patches:"))
for addr, (old, new) in patches.items():
- old_insns = disasm(old, arch=pwndbg.gdblib.arch.current)
- new_insns = disasm(new, arch=pwndbg.gdblib.arch.current)
+ old_insns = disasm(old)
+ new_insns = disasm(new)
print(
message.hint("Patch at"),
| {"golden_diff": "diff --git a/pwndbg/commands/patch.py b/pwndbg/commands/patch.py\n--- a/pwndbg/commands/patch.py\n+++ b/pwndbg/commands/patch.py\n@@ -22,7 +22,7 @@\n @pwndbg.commands.ArgparsedCommand(parser)\n @pwndbg.commands.OnlyWhenRunning\n def patch(address, ins):\n- new_mem = asm(ins, arch=pwndbg.gdblib.arch.current)\n+ new_mem = asm(ins)\n \n old_mem = pwndbg.gdblib.memory.read(address, len(new_mem))\n \n@@ -68,8 +68,8 @@\n \n print(message.hint(\"Patches:\"))\n for addr, (old, new) in patches.items():\n- old_insns = disasm(old, arch=pwndbg.gdblib.arch.current)\n- new_insns = disasm(new, arch=pwndbg.gdblib.arch.current)\n+ old_insns = disasm(old)\n+ new_insns = disasm(new)\n \n print(\n message.hint(\"Patch at\"),\n", "issue": "pwndbg.gdblib.arch.current is wrong if executed in a pwntools gdbscript command\nTL;DR: This should not fail, but does so, because the `patch` command fetches `pwndbg.gdblib.arch.current` which for some reason is wrong/not updated.\r\n\r\n\r\n```py\r\nfrom pwn import *\r\n\r\ngdbscript = '''\r\ntbreak main\r\npatch $rip 'xor rax, rax'\r\ncontinue\r\n'''\r\n\r\np = gdb.debug('/bin/ls', gdbscript=gdbscript)\r\n\r\np.interactive()\r\n```\r\n\r\nStacktrace:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/dc/tools/pwndbg/pwndbg/commands/__init__.py\", line 145, in __call__\r\n return self.function(*args, **kwargs)\r\n File \"/home/dc/tools/pwndbg/pwndbg/commands/__init__.py\", line 216, in _OnlyWhenRunning\r\n return function(*a, **kw)\r\n File \"/home/dc/tools/pwndbg/pwndbg/commands/patch.py\", line 25, in patch\r\n new_mem = asm(ins, arch=pwndbg.gdblib.arch.current)\r\n File \"/home/dc/.virtualenvs/pwn/lib/python3.8/site-packages/pwnlib/context/__init__.py\", line 1444, in setter\r\n raise AttributeError(\"Invalid arch/bits combination: %s/%s\" % (arch, bits))\r\nAttributeError: Invalid arch/bits combination: i386/64\r\n\r\nIf that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues\r\n(Please don't forget to search if it hasn't been reported before)\r\nTo generate the report and open a browser, you may run `bugreport --run-browser`\r\nPS: Pull requests are welcome\r\n> /home/dc/.virtualenvs/pwn/lib/python3.8/site-packages/pwnlib/context/__init__.py(1444)setter()\r\n-> raise AttributeError(\"Invalid arch/bits combination: %s/%s\" % (arch, bits))\r\n(Pdb) p arch, bits\r\n('i386', 64)\r\n(Pdb) \r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport argparse\n\nfrom pwnlib.asm import asm\nfrom pwnlib.asm import disasm\n\nimport pwndbg.color.message as message\nimport pwndbg.commands\nimport pwndbg.gdblib.memory\nimport pwndbg.lib.memoize\n\n# Keep old patches made so we can revert them\npatches = {}\n\n\nparser = argparse.ArgumentParser(description=\"Patches given instruction with given code or bytes\")\nparser.add_argument(\"address\", type=int, help=\"The address to patch\")\nparser.add_argument(\"ins\", type=str, help=\"instruction[s]\")\n\n\[email protected](parser)\[email protected]\ndef patch(address, ins):\n new_mem = asm(ins, arch=pwndbg.gdblib.arch.current)\n\n old_mem = pwndbg.gdblib.memory.read(address, len(new_mem))\n\n patches[address] = (old_mem, new_mem)\n\n pwndbg.gdblib.memory.write(address, new_mem)\n\n pwndbg.lib.memoize.reset()\n\n\nparser2 = argparse.ArgumentParser(description=\"Revert patch at given address\")\nparser2.add_argument(\"address\", type=int, help=\"Address to revert patch on\")\n\n\[email protected](parser2)\[email protected]\ndef patch_revert(address):\n if not patches:\n print(message.notice(\"No patches to revert\"))\n return\n\n if address == -1:\n for addr, (old, _new) in patches.items():\n pwndbg.gdblib.memory.write(addr, old)\n print(message.notice(\"Reverted patch at %#x\" % addr))\n patches.clear()\n else:\n old, _new = patches[address]\n pwndbg.gdblib.memory.write(address, old)\n\n pwndbg.lib.memoize.reset()\n\n\nparser3 = argparse.ArgumentParser(description=\"List all patches\")\n\n\[email protected](parser3)\[email protected]\ndef patch_list():\n if not patches:\n print(message.hint(\"No patches to list\"))\n return\n\n print(message.hint(\"Patches:\"))\n for addr, (old, new) in patches.items():\n old_insns = disasm(old, arch=pwndbg.gdblib.arch.current)\n new_insns = disasm(new, arch=pwndbg.gdblib.arch.current)\n\n print(\n message.hint(\"Patch at\"),\n message.warning(\"%#x:\" % addr),\n message.hint(\"from\"),\n message.warning(old_insns.replace(\"\\n\", \"; \")),\n message.hint(\"to\"),\n message.warning(new_insns.replace(\"\\n\", \"; \")),\n )\n", "path": "pwndbg/commands/patch.py"}]} | 1,759 | 237 |
gh_patches_debug_15569 | rasdani/github-patches | git_diff | lightly-ai__lightly-215 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ignore lightly outputs when creating a dataset
When working on a folder of images, e.g.
```
dataset/
L image_1.jpg
L image_2.jpg
L image_3.jpg
```
the following commands will not work when in the dataset directory:
```
lightly-embed input_dir=./
lightly-train input_dir=./
lightly-magic input_dir=./
```
This is because the command-line tool creates a directory `lightly_outputs` where logs and results are stored. However, when creating the `LightlyDataset`, this directory will be interpreted as a subfolder with images in it which leads to an error. We can handle this by ignoring the `lightly_outputs` directory.
</issue>
<code>
[start of lightly/data/_helpers.py]
1 """ Helper Functions """
2
3 # Copyright (c) 2020. Lightly AG and its affiliates.
4 # All Rights Reserved
5
6 import os
7 from torchvision import datasets
8
9 from lightly.data._image import DatasetFolder
10
11 try:
12 from lightly.data._video import VideoDataset
13 VIDEO_DATASET_AVAILABLE = True
14 except Exception as e:
15 VIDEO_DATASET_AVAILABLE = False
16 VIDEO_DATASET_ERRORMSG = e
17
18
19 IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp',
20 '.pgm', '.tif', '.tiff', '.webp')
21
22 VIDEO_EXTENSIONS = ('.mp4', '.mov', '.avi')
23
24
25 def _contains_videos(root: str, extensions: tuple):
26 """Checks whether directory contains video files.
27
28 Args:
29 root: Root directory path.
30
31 Returns:
32 True if root contains subdirectories else false.
33 """
34 list_dir = os.listdir(root)
35 is_video = \
36 [f.lower().endswith(extensions) for f in list_dir]
37 return any(is_video)
38
39
40 def _contains_subdirs(root: str):
41 """Checks whether directory contains subdirectories.
42
43 Args:
44 root: Root directory path.
45
46 Returns:
47 True if root contains subdirectories else false.
48
49 """
50 list_dir = os.listdir(root)
51 is_dir = \
52 [os.path.isdir(os.path.join(root, f)) for f in list_dir]
53 return any(is_dir)
54
55
56 def _load_dataset_from_folder(root: str, transform):
57 """Initializes dataset from folder.
58
59 Args:
60 root: (str) Root directory path
61 transform: (torchvision.transforms.Compose) image transformations
62
63 Returns:
64 Dataset consisting of images in the root directory.
65
66 """
67
68 # if there is a video in the input directory but we do not have
69 # the right dependencies, raise a ValueError
70 contains_videos = _contains_videos(root, VIDEO_EXTENSIONS)
71 if contains_videos and not VIDEO_DATASET_AVAILABLE:
72 raise ValueError(f'The input directory {root} contains videos '
73 'but the VideoDataset is not available. \n'
74 'Make sure you have installed the right '
75 'dependencies. The error from the imported '
76 f'module was: {VIDEO_DATASET_ERRORMSG}')
77
78 if contains_videos:
79 # root contains videos -> create a video dataset
80 dataset = VideoDataset(root,
81 extensions=VIDEO_EXTENSIONS,
82 transform=transform)
83 elif _contains_subdirs(root):
84 # root contains subdirectories -> create an image folder dataset
85 dataset = datasets.ImageFolder(root,
86 transform=transform)
87 else:
88 # root contains plain images -> create a folder dataset
89 dataset = DatasetFolder(root,
90 extensions=IMG_EXTENSIONS,
91 transform=transform)
92
93 return dataset
94
95
96 def _load_dataset(input_dir: str,
97 transform=None):
98 """Initializes dataset from torchvision or from folder.
99
100 Args:
101 root: (str) Directory where dataset is stored
102 name: (str) Name of the dataset (e.g. cifar10, cifar100)
103 train: (bool) Use the training set
104 download: (bool) Download the dataset
105 transform: (torchvision.transforms.Compose) image transformations
106 from_folder: (str) Path to directory holding the images to load.
107
108 Returns:
109 A torchvision dataset
110
111 Raises:
112 ValueError: If the specified dataset doesn't exist
113
114 """
115
116 if not os.path.exists(input_dir):
117 raise ValueError(f'The input directory {input_dir} does not exist!')
118
119 return _load_dataset_from_folder(input_dir, transform)
120
[end of lightly/data/_helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lightly/data/_helpers.py b/lightly/data/_helpers.py
--- a/lightly/data/_helpers.py
+++ b/lightly/data/_helpers.py
@@ -37,6 +37,19 @@
return any(is_video)
+def _is_lightly_output_dir(dirname: str):
+ """Checks whether the directory is a lightly_output directory.
+
+ Args:
+ dirname: Directory to check.
+
+ Returns:
+ True if dirname is "lightly_outputs" else false.
+
+ """
+ return 'lightly_outputs' in dirname
+
+
def _contains_subdirs(root: str):
"""Checks whether directory contains subdirectories.
@@ -48,6 +61,7 @@
"""
list_dir = os.listdir(root)
+ list_dir = list(filter(lambda x: not _is_lightly_output_dir(x), list_dir))
is_dir = \
[os.path.isdir(os.path.join(root, f)) for f in list_dir]
return any(is_dir)
| {"golden_diff": "diff --git a/lightly/data/_helpers.py b/lightly/data/_helpers.py\n--- a/lightly/data/_helpers.py\n+++ b/lightly/data/_helpers.py\n@@ -37,6 +37,19 @@\n return any(is_video)\n \n \n+def _is_lightly_output_dir(dirname: str):\n+ \"\"\"Checks whether the directory is a lightly_output directory.\n+\n+ Args:\n+ dirname: Directory to check.\n+\n+ Returns:\n+ True if dirname is \"lightly_outputs\" else false.\n+\n+ \"\"\"\n+ return 'lightly_outputs' in dirname\n+\n+\n def _contains_subdirs(root: str):\n \"\"\"Checks whether directory contains subdirectories.\n \n@@ -48,6 +61,7 @@\n \n \"\"\"\n list_dir = os.listdir(root)\n+ list_dir = list(filter(lambda x: not _is_lightly_output_dir(x), list_dir))\n is_dir = \\\n [os.path.isdir(os.path.join(root, f)) for f in list_dir]\n return any(is_dir)\n", "issue": "Ignore lightly outputs when creating a dataset\nWhen working on a folder of images, e.g.\r\n```\r\ndataset/\r\nL image_1.jpg\r\nL image_2.jpg\r\nL image_3.jpg\r\n```\r\nthe following commands will not work when in the dataset directory:\r\n```\r\nlightly-embed input_dir=./\r\nlightly-train input_dir=./\r\nlightly-magic input_dir=./\r\n```\r\n\r\nThis is because the command-line tool creates a directory `lightly_outputs` where logs and results are stored. However, when creating the `LightlyDataset`, this directory will be interpreted as a subfolder with images in it which leads to an error. We can handle this by ignoring the `lightly_outputs` directory.\n", "before_files": [{"content": "\"\"\" Helper Functions \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport os\nfrom torchvision import datasets\n\nfrom lightly.data._image import DatasetFolder\n\ntry:\n from lightly.data._video import VideoDataset\n VIDEO_DATASET_AVAILABLE = True\nexcept Exception as e:\n VIDEO_DATASET_AVAILABLE = False\n VIDEO_DATASET_ERRORMSG = e\n\n\nIMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp',\n '.pgm', '.tif', '.tiff', '.webp')\n\nVIDEO_EXTENSIONS = ('.mp4', '.mov', '.avi')\n\n\ndef _contains_videos(root: str, extensions: tuple):\n \"\"\"Checks whether directory contains video files.\n\n Args:\n root: Root directory path.\n\n Returns:\n True if root contains subdirectories else false.\n \"\"\"\n list_dir = os.listdir(root)\n is_video = \\\n [f.lower().endswith(extensions) for f in list_dir]\n return any(is_video)\n\n\ndef _contains_subdirs(root: str):\n \"\"\"Checks whether directory contains subdirectories.\n\n Args:\n root: Root directory path.\n\n Returns:\n True if root contains subdirectories else false.\n\n \"\"\"\n list_dir = os.listdir(root)\n is_dir = \\\n [os.path.isdir(os.path.join(root, f)) for f in list_dir]\n return any(is_dir)\n\n\ndef _load_dataset_from_folder(root: str, transform):\n \"\"\"Initializes dataset from folder.\n\n Args:\n root: (str) Root directory path\n transform: (torchvision.transforms.Compose) image transformations\n\n Returns:\n Dataset consisting of images in the root directory.\n\n \"\"\"\n\n # if there is a video in the input directory but we do not have\n # the right dependencies, raise a ValueError\n contains_videos = _contains_videos(root, VIDEO_EXTENSIONS)\n if contains_videos and not VIDEO_DATASET_AVAILABLE:\n raise ValueError(f'The input directory {root} contains videos '\n 'but the VideoDataset is not available. \\n'\n 'Make sure you have installed the right '\n 'dependencies. The error from the imported '\n f'module was: {VIDEO_DATASET_ERRORMSG}')\n\n if contains_videos:\n # root contains videos -> create a video dataset\n dataset = VideoDataset(root,\n extensions=VIDEO_EXTENSIONS,\n transform=transform)\n elif _contains_subdirs(root):\n # root contains subdirectories -> create an image folder dataset\n dataset = datasets.ImageFolder(root,\n transform=transform)\n else:\n # root contains plain images -> create a folder dataset\n dataset = DatasetFolder(root,\n extensions=IMG_EXTENSIONS,\n transform=transform)\n\n return dataset\n\n\ndef _load_dataset(input_dir: str,\n transform=None):\n \"\"\"Initializes dataset from torchvision or from folder.\n\n Args:\n root: (str) Directory where dataset is stored\n name: (str) Name of the dataset (e.g. cifar10, cifar100)\n train: (bool) Use the training set\n download: (bool) Download the dataset\n transform: (torchvision.transforms.Compose) image transformations\n from_folder: (str) Path to directory holding the images to load.\n\n Returns:\n A torchvision dataset\n\n Raises:\n ValueError: If the specified dataset doesn't exist\n\n \"\"\"\n\n if not os.path.exists(input_dir):\n raise ValueError(f'The input directory {input_dir} does not exist!')\n\n return _load_dataset_from_folder(input_dir, transform)\n", "path": "lightly/data/_helpers.py"}]} | 1,709 | 222 |
gh_patches_debug_15140 | rasdani/github-patches | git_diff | uccser__cs-unplugged-1381 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Plugging it in area is not accessible on dev server
This link should work: https://cs-unplugged-dev.appspot.com/plugging-it-in/
</issue>
<code>
[start of csunplugged/config/urls.py]
1 """URL configuration for the Django system.
2
3 The `urlpatterns` list routes URLs to views. For more information please see:
4 https://docs.djangoproject.com/en/dev/topics/http/urls/
5 """
6
7 from django.conf import settings
8 from django.urls import include, path
9 from django.conf.urls.i18n import i18n_patterns
10 from django.contrib import admin
11
12 urlpatterns = i18n_patterns(
13 path('', include('general.urls', namespace='general')),
14 path('topics/', include('topics.urls', namespace='topics')),
15 path('resources/', include('resources.urls', namespace='resources')),
16 path('at-home/', include('at_home.urls', namespace='at_home')),
17 )
18
19 urlpatterns += [
20 path('', include('classic.urls')),
21 path('en/search/', include('search.urls', namespace='search')),
22 path('admin/', admin.site.urls),
23 ]
24
25 if not settings.DJANGO_PRODUCTION:
26 urlpatterns += [
27 path('plugging-it-in/', include('plugging_it_in.urls', namespace='plugging_it_in')),
28 ]
29
30 if settings.DEBUG: # pragma: no cover
31 import debug_toolbar
32 urlpatterns += [
33 path('__debug__/', include(debug_toolbar.urls)),
34 ]
35 urlpatterns += i18n_patterns(
36 path('__dev__/', include('dev.urls', namespace='dev')),
37 )
38 # These patterns allows these error pages to be debugged during development.
39 from django.views import defaults
40 urlpatterns += [
41 path('400/', defaults.bad_request, kwargs={'exception': Exception('Bad request')}),
42 path('403/', defaults.permission_denied, kwargs={'exception': Exception('Permissin denied')}),
43 path('404/', defaults.page_not_found, kwargs={'exception': Exception('Page not found')}),
44 path('500/', defaults.server_error),
45 ]
46
[end of csunplugged/config/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/csunplugged/config/urls.py b/csunplugged/config/urls.py
--- a/csunplugged/config/urls.py
+++ b/csunplugged/config/urls.py
@@ -8,6 +8,8 @@
from django.urls import include, path
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
+import environ
+env = environ.Env()
urlpatterns = i18n_patterns(
path('', include('general.urls', namespace='general')),
@@ -22,7 +24,7 @@
path('admin/', admin.site.urls),
]
-if not settings.DJANGO_PRODUCTION:
+if not env("DEPLOYMENT", default=None) == "prod":
urlpatterns += [
path('plugging-it-in/', include('plugging_it_in.urls', namespace='plugging_it_in')),
]
| {"golden_diff": "diff --git a/csunplugged/config/urls.py b/csunplugged/config/urls.py\n--- a/csunplugged/config/urls.py\n+++ b/csunplugged/config/urls.py\n@@ -8,6 +8,8 @@\n from django.urls import include, path\n from django.conf.urls.i18n import i18n_patterns\n from django.contrib import admin\n+import environ\n+env = environ.Env()\n \n urlpatterns = i18n_patterns(\n path('', include('general.urls', namespace='general')),\n@@ -22,7 +24,7 @@\n path('admin/', admin.site.urls),\n ]\n \n-if not settings.DJANGO_PRODUCTION:\n+if not env(\"DEPLOYMENT\", default=None) == \"prod\":\n urlpatterns += [\n path('plugging-it-in/', include('plugging_it_in.urls', namespace='plugging_it_in')),\n ]\n", "issue": "Plugging it in area is not accessible on dev server\nThis link should work: https://cs-unplugged-dev.appspot.com/plugging-it-in/\n", "before_files": [{"content": "\"\"\"URL configuration for the Django system.\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/dev/topics/http/urls/\n\"\"\"\n\nfrom django.conf import settings\nfrom django.urls import include, path\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib import admin\n\nurlpatterns = i18n_patterns(\n path('', include('general.urls', namespace='general')),\n path('topics/', include('topics.urls', namespace='topics')),\n path('resources/', include('resources.urls', namespace='resources')),\n path('at-home/', include('at_home.urls', namespace='at_home')),\n)\n\nurlpatterns += [\n path('', include('classic.urls')),\n path('en/search/', include('search.urls', namespace='search')),\n path('admin/', admin.site.urls),\n]\n\nif not settings.DJANGO_PRODUCTION:\n urlpatterns += [\n path('plugging-it-in/', include('plugging_it_in.urls', namespace='plugging_it_in')),\n ]\n\nif settings.DEBUG: # pragma: no cover\n import debug_toolbar\n urlpatterns += [\n path('__debug__/', include(debug_toolbar.urls)),\n ]\n urlpatterns += i18n_patterns(\n path('__dev__/', include('dev.urls', namespace='dev')),\n )\n # These patterns allows these error pages to be debugged during development.\n from django.views import defaults\n urlpatterns += [\n path('400/', defaults.bad_request, kwargs={'exception': Exception('Bad request')}),\n path('403/', defaults.permission_denied, kwargs={'exception': Exception('Permissin denied')}),\n path('404/', defaults.page_not_found, kwargs={'exception': Exception('Page not found')}),\n path('500/', defaults.server_error),\n ]\n", "path": "csunplugged/config/urls.py"}]} | 1,041 | 192 |
gh_patches_debug_23993 | rasdani/github-patches | git_diff | sanic-org__sanic-2640 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
There is an obvious bug in ASGI WebsocketConnection of Sanic
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
I started my sanic app with UvicornWorker. The original websocket will become WebsocketConnection. When I call
the ws.recv function will report an error if bytes data is received at this time.
`KeyError:‘text’`
[https://github.com/sanic-org/sanic/blob/main/sanic/server/websockets/connection.py](url)
` async def recv(self, *args, **kwargs) -> Optional[str]:
message = await self._receive()
if message["type"] == "websocket.receive":
return message["text"]
elif message["type"] == "websocket.disconnect":
pass
return None`
There is no data of bytes type processed here.
### Code snippet
_No response_
### Expected Behavior
_No response_
### How do you run Sanic?
ASGI
### Operating System
ubuntu
### Sanic Version
22.3
### Additional context
_No response_
</issue>
<code>
[start of sanic/server/websockets/connection.py]
1 from typing import (
2 Any,
3 Awaitable,
4 Callable,
5 Dict,
6 List,
7 MutableMapping,
8 Optional,
9 Union,
10 )
11
12
13 ASIMessage = MutableMapping[str, Any]
14
15
16 class WebSocketConnection:
17 """
18 This is for ASGI Connections.
19 It provides an interface similar to WebsocketProtocol, but
20 sends/receives over an ASGI connection.
21 """
22
23 # TODO
24 # - Implement ping/pong
25
26 def __init__(
27 self,
28 send: Callable[[ASIMessage], Awaitable[None]],
29 receive: Callable[[], Awaitable[ASIMessage]],
30 subprotocols: Optional[List[str]] = None,
31 ) -> None:
32 self._send = send
33 self._receive = receive
34 self._subprotocols = subprotocols or []
35
36 async def send(self, data: Union[str, bytes], *args, **kwargs) -> None:
37 message: Dict[str, Union[str, bytes]] = {"type": "websocket.send"}
38
39 if isinstance(data, bytes):
40 message.update({"bytes": data})
41 else:
42 message.update({"text": str(data)})
43
44 await self._send(message)
45
46 async def recv(self, *args, **kwargs) -> Optional[str]:
47 message = await self._receive()
48
49 if message["type"] == "websocket.receive":
50 return message["text"]
51 elif message["type"] == "websocket.disconnect":
52 pass
53
54 return None
55
56 receive = recv
57
58 async def accept(self, subprotocols: Optional[List[str]] = None) -> None:
59 subprotocol = None
60 if subprotocols:
61 for subp in subprotocols:
62 if subp in self.subprotocols:
63 subprotocol = subp
64 break
65
66 await self._send(
67 {
68 "type": "websocket.accept",
69 "subprotocol": subprotocol,
70 }
71 )
72
73 async def close(self, code: int = 1000, reason: str = "") -> None:
74 pass
75
76 @property
77 def subprotocols(self):
78 return self._subprotocols
79
80 @subprotocols.setter
81 def subprotocols(self, subprotocols: Optional[List[str]] = None):
82 self._subprotocols = subprotocols or []
83
[end of sanic/server/websockets/connection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sanic/server/websockets/connection.py b/sanic/server/websockets/connection.py
--- a/sanic/server/websockets/connection.py
+++ b/sanic/server/websockets/connection.py
@@ -9,8 +9,10 @@
Union,
)
+from sanic.exceptions import InvalidUsage
-ASIMessage = MutableMapping[str, Any]
+
+ASGIMessage = MutableMapping[str, Any]
class WebSocketConnection:
@@ -25,8 +27,8 @@
def __init__(
self,
- send: Callable[[ASIMessage], Awaitable[None]],
- receive: Callable[[], Awaitable[ASIMessage]],
+ send: Callable[[ASGIMessage], Awaitable[None]],
+ receive: Callable[[], Awaitable[ASGIMessage]],
subprotocols: Optional[List[str]] = None,
) -> None:
self._send = send
@@ -47,7 +49,13 @@
message = await self._receive()
if message["type"] == "websocket.receive":
- return message["text"]
+ try:
+ return message["text"]
+ except KeyError:
+ try:
+ return message["bytes"].decode()
+ except KeyError:
+ raise InvalidUsage("Bad ASGI message received")
elif message["type"] == "websocket.disconnect":
pass
| {"golden_diff": "diff --git a/sanic/server/websockets/connection.py b/sanic/server/websockets/connection.py\n--- a/sanic/server/websockets/connection.py\n+++ b/sanic/server/websockets/connection.py\n@@ -9,8 +9,10 @@\n Union,\n )\n \n+from sanic.exceptions import InvalidUsage\n \n-ASIMessage = MutableMapping[str, Any]\n+\n+ASGIMessage = MutableMapping[str, Any]\n \n \n class WebSocketConnection:\n@@ -25,8 +27,8 @@\n \n def __init__(\n self,\n- send: Callable[[ASIMessage], Awaitable[None]],\n- receive: Callable[[], Awaitable[ASIMessage]],\n+ send: Callable[[ASGIMessage], Awaitable[None]],\n+ receive: Callable[[], Awaitable[ASGIMessage]],\n subprotocols: Optional[List[str]] = None,\n ) -> None:\n self._send = send\n@@ -47,7 +49,13 @@\n message = await self._receive()\n \n if message[\"type\"] == \"websocket.receive\":\n- return message[\"text\"]\n+ try:\n+ return message[\"text\"]\n+ except KeyError:\n+ try:\n+ return message[\"bytes\"].decode()\n+ except KeyError:\n+ raise InvalidUsage(\"Bad ASGI message received\")\n elif message[\"type\"] == \"websocket.disconnect\":\n pass\n", "issue": "There is an obvious bug in ASGI WebsocketConnection of Sanic\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Describe the bug\n\nI started my sanic app with UvicornWorker. The original websocket will become WebsocketConnection. When I call\r\nthe ws.recv function will report an error if bytes data is received at this time.\r\n`KeyError\uff1a\u2018text\u2019`\r\n[https://github.com/sanic-org/sanic/blob/main/sanic/server/websockets/connection.py](url)\r\n` async def recv(self, *args, **kwargs) -> Optional[str]:\r\n message = await self._receive()\r\n\r\n if message[\"type\"] == \"websocket.receive\":\r\n return message[\"text\"]\r\n elif message[\"type\"] == \"websocket.disconnect\":\r\n pass\r\n\r\n return None`\r\nThere is no data of bytes type processed here.\n\n### Code snippet\n\n_No response_\n\n### Expected Behavior\n\n_No response_\n\n### How do you run Sanic?\n\nASGI\n\n### Operating System\n\nubuntu\n\n### Sanic Version\n\n22.3\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "from typing import (\n Any,\n Awaitable,\n Callable,\n Dict,\n List,\n MutableMapping,\n Optional,\n Union,\n)\n\n\nASIMessage = MutableMapping[str, Any]\n\n\nclass WebSocketConnection:\n \"\"\"\n This is for ASGI Connections.\n It provides an interface similar to WebsocketProtocol, but\n sends/receives over an ASGI connection.\n \"\"\"\n\n # TODO\n # - Implement ping/pong\n\n def __init__(\n self,\n send: Callable[[ASIMessage], Awaitable[None]],\n receive: Callable[[], Awaitable[ASIMessage]],\n subprotocols: Optional[List[str]] = None,\n ) -> None:\n self._send = send\n self._receive = receive\n self._subprotocols = subprotocols or []\n\n async def send(self, data: Union[str, bytes], *args, **kwargs) -> None:\n message: Dict[str, Union[str, bytes]] = {\"type\": \"websocket.send\"}\n\n if isinstance(data, bytes):\n message.update({\"bytes\": data})\n else:\n message.update({\"text\": str(data)})\n\n await self._send(message)\n\n async def recv(self, *args, **kwargs) -> Optional[str]:\n message = await self._receive()\n\n if message[\"type\"] == \"websocket.receive\":\n return message[\"text\"]\n elif message[\"type\"] == \"websocket.disconnect\":\n pass\n\n return None\n\n receive = recv\n\n async def accept(self, subprotocols: Optional[List[str]] = None) -> None:\n subprotocol = None\n if subprotocols:\n for subp in subprotocols:\n if subp in self.subprotocols:\n subprotocol = subp\n break\n\n await self._send(\n {\n \"type\": \"websocket.accept\",\n \"subprotocol\": subprotocol,\n }\n )\n\n async def close(self, code: int = 1000, reason: str = \"\") -> None:\n pass\n\n @property\n def subprotocols(self):\n return self._subprotocols\n\n @subprotocols.setter\n def subprotocols(self, subprotocols: Optional[List[str]] = None):\n self._subprotocols = subprotocols or []\n", "path": "sanic/server/websockets/connection.py"}]} | 1,413 | 299 |
gh_patches_debug_32229 | rasdani/github-patches | git_diff | Cloud-CV__EvalAI-922 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Modify contact_us api to fetch name and email, if the user is logged in.
This issue is related to #853 as discussed with @deshraj .
</issue>
<code>
[start of apps/web/views.py]
1 from django.contrib.auth.models import User
2 from django.shortcuts import render
3
4 from .models import Team
5
6 from rest_framework import permissions, status
7 from rest_framework.decorators import (api_view,
8 permission_classes,
9 throttle_classes,)
10 from rest_framework.response import Response
11 from rest_framework.throttling import AnonRateThrottle
12
13 from .serializers import ContactSerializer, TeamSerializer
14
15
16 def home(request, template_name="index.html"):
17 """
18 Home Page View
19 """
20 return render(request, template_name)
21
22
23 def page_not_found(request):
24 response = render(request, 'error404.html',
25 )
26 response.status_code = 404
27 return response
28
29
30 def internal_server_error(request):
31 response = render(request, 'error500.html',
32 )
33 response.status_code = 500
34 return response
35
36
37 @throttle_classes([AnonRateThrottle, ])
38 @api_view(['POST', ])
39 @permission_classes((permissions.AllowAny,))
40 def contact_us(request):
41 try:
42 user = User.objects.get(username=request.user)
43 name = user.username
44 email = user.email
45 request_data = {"name": name, "email": email}
46 request_data['message'] = request.data['message']
47 serializer = ContactSerializer(data=request_data)
48 if serializer.is_valid():
49 serializer.save()
50 response_data = {'message': 'Your message has been successfully recorded. We will contact you shortly.'}
51 return Response(response_data, status=status.HTTP_201_CREATED)
52 return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
53 except:
54 serializer = ContactSerializer(data=request.data)
55 if serializer.is_valid():
56 serializer.save()
57 response_data = {'message': 'Your message has been successfully recorded. We will contact you shortly.'}
58 return Response(response_data, status=status.HTTP_201_CREATED)
59 return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
60
61
62 @throttle_classes([AnonRateThrottle])
63 @api_view(['GET', 'POST'])
64 @permission_classes((permissions.AllowAny,))
65 def our_team(request):
66 if request.method == 'GET':
67 teams = Team.objects.all()
68 serializer = TeamSerializer(teams, many=True, context={'request': request})
69 response_data = serializer.data
70 return Response(response_data, status=status.HTTP_200_OK)
71 elif request.method == 'POST':
72 # team_type is set to Team.CONTRIBUTOR by default and can be overridden by the requester
73 request.data['team_type'] = request.data.get('team_type', Team.CONTRIBUTOR)
74 serializer = TeamSerializer(data=request.data)
75 if serializer.is_valid():
76 serializer.save()
77 response_data = {'message', 'Successfully added the contributor.'}
78 return Response(response_data, status=status.HTTP_201_CREATED)
79 return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
80
[end of apps/web/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/web/views.py b/apps/web/views.py
--- a/apps/web/views.py
+++ b/apps/web/views.py
@@ -35,29 +35,33 @@
@throttle_classes([AnonRateThrottle, ])
-@api_view(['POST', ])
+@api_view(['GET', 'POST'])
@permission_classes((permissions.AllowAny,))
def contact_us(request):
+ user_does_not_exist = False
try:
user = User.objects.get(username=request.user)
name = user.username
email = user.email
- request_data = {"name": name, "email": email}
- request_data['message'] = request.data['message']
- serializer = ContactSerializer(data=request_data)
- if serializer.is_valid():
- serializer.save()
- response_data = {'message': 'Your message has been successfully recorded. We will contact you shortly.'}
- return Response(response_data, status=status.HTTP_201_CREATED)
- return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
+ request_data = {'name': name, 'email': email}
except:
- serializer = ContactSerializer(data=request.data)
+ request_data = request.data
+ user_does_not_exist = True
+
+ if request.method == 'POST' or user_does_not_exist:
+ if request.POST.get('message'):
+ request_data['message'] = request.POST.get('message')
+ serializer = ContactSerializer(data=request_data)
if serializer.is_valid():
serializer.save()
- response_data = {'message': 'Your message has been successfully recorded. We will contact you shortly.'}
+ response_data = {'message': 'We have received your request and will contact you shortly.'}
return Response(response_data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
+ elif request.method == 'GET':
+ response_data = {"name": name, "email": email}
+ return Response(response_data, status=status.HTTP_200_OK)
+
@throttle_classes([AnonRateThrottle])
@api_view(['GET', 'POST'])
| {"golden_diff": "diff --git a/apps/web/views.py b/apps/web/views.py\n--- a/apps/web/views.py\n+++ b/apps/web/views.py\n@@ -35,29 +35,33 @@\n \n \n @throttle_classes([AnonRateThrottle, ])\n-@api_view(['POST', ])\n+@api_view(['GET', 'POST'])\n @permission_classes((permissions.AllowAny,))\n def contact_us(request):\n+ user_does_not_exist = False\n try:\n user = User.objects.get(username=request.user)\n name = user.username\n email = user.email\n- request_data = {\"name\": name, \"email\": email}\n- request_data['message'] = request.data['message']\n- serializer = ContactSerializer(data=request_data)\n- if serializer.is_valid():\n- serializer.save()\n- response_data = {'message': 'Your message has been successfully recorded. We will contact you shortly.'}\n- return Response(response_data, status=status.HTTP_201_CREATED)\n- return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n+ request_data = {'name': name, 'email': email}\n except:\n- serializer = ContactSerializer(data=request.data)\n+ request_data = request.data\n+ user_does_not_exist = True\n+\n+ if request.method == 'POST' or user_does_not_exist:\n+ if request.POST.get('message'):\n+ request_data['message'] = request.POST.get('message')\n+ serializer = ContactSerializer(data=request_data)\n if serializer.is_valid():\n serializer.save()\n- response_data = {'message': 'Your message has been successfully recorded. We will contact you shortly.'}\n+ response_data = {'message': 'We have received your request and will contact you shortly.'}\n return Response(response_data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n+ elif request.method == 'GET':\n+ response_data = {\"name\": name, \"email\": email}\n+ return Response(response_data, status=status.HTTP_200_OK)\n+\n \n @throttle_classes([AnonRateThrottle])\n @api_view(['GET', 'POST'])\n", "issue": "Modify contact_us api to fetch name and email, if the user is logged in.\nThis issue is related to #853 as discussed with @deshraj .\n", "before_files": [{"content": "from django.contrib.auth.models import User\nfrom django.shortcuts import render\n\nfrom .models import Team\n\nfrom rest_framework import permissions, status\nfrom rest_framework.decorators import (api_view,\n permission_classes,\n throttle_classes,)\nfrom rest_framework.response import Response\nfrom rest_framework.throttling import AnonRateThrottle\n\nfrom .serializers import ContactSerializer, TeamSerializer\n\n\ndef home(request, template_name=\"index.html\"):\n \"\"\"\n Home Page View\n \"\"\"\n return render(request, template_name)\n\n\ndef page_not_found(request):\n response = render(request, 'error404.html',\n )\n response.status_code = 404\n return response\n\n\ndef internal_server_error(request):\n response = render(request, 'error500.html',\n )\n response.status_code = 500\n return response\n\n\n@throttle_classes([AnonRateThrottle, ])\n@api_view(['POST', ])\n@permission_classes((permissions.AllowAny,))\ndef contact_us(request):\n try:\n user = User.objects.get(username=request.user)\n name = user.username\n email = user.email\n request_data = {\"name\": name, \"email\": email}\n request_data['message'] = request.data['message']\n serializer = ContactSerializer(data=request_data)\n if serializer.is_valid():\n serializer.save()\n response_data = {'message': 'Your message has been successfully recorded. We will contact you shortly.'}\n return Response(response_data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n except:\n serializer = ContactSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n response_data = {'message': 'Your message has been successfully recorded. We will contact you shortly.'}\n return Response(response_data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@throttle_classes([AnonRateThrottle])\n@api_view(['GET', 'POST'])\n@permission_classes((permissions.AllowAny,))\ndef our_team(request):\n if request.method == 'GET':\n teams = Team.objects.all()\n serializer = TeamSerializer(teams, many=True, context={'request': request})\n response_data = serializer.data\n return Response(response_data, status=status.HTTP_200_OK)\n elif request.method == 'POST':\n # team_type is set to Team.CONTRIBUTOR by default and can be overridden by the requester\n request.data['team_type'] = request.data.get('team_type', Team.CONTRIBUTOR)\n serializer = TeamSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n response_data = {'message', 'Successfully added the contributor.'}\n return Response(response_data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n", "path": "apps/web/views.py"}]} | 1,337 | 469 |
gh_patches_debug_49168 | rasdani/github-patches | git_diff | mkdocs__mkdocs-2800 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
jinja2 3.1.0 breaks mkdocs
since the jinja2 3.1.0 release mkdocs does not work any more:
```
admin@host ui % pip install jinja2==3.1.0
Collecting jinja2==3.1.0
Using cached Jinja2-3.1.0-py3-none-any.whl (132 kB)
Installing collected packages: jinja2
Attempting uninstall: jinja2
Found existing installation: Jinja2 3.0.0
Uninstalling Jinja2-3.0.0:
Successfully uninstalled Jinja2-3.0.0
Successfully installed jinja2-3.1.0
admin@host ui % mkdocs build
Traceback (most recent call last):
File "/usr/local/bin/mkdocs", line 8, in <module>
sys.exit(cli())
File "/usr/local/lib/python3.9/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.9/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.9/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.9/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/mkdocs/__main__.py", line 187, in build_command
build.build(config.load_config(**kwargs), dirty=not clean)
File "/usr/local/lib/python3.9/site-packages/mkdocs/config/base.py", line 216, in load_config
from mkdocs.config.defaults import get_schema
File "/usr/local/lib/python3.9/site-packages/mkdocs/config/defaults.py", line 1, in <module>
from mkdocs.config import config_options
File "/usr/local/lib/python3.9/site-packages/mkdocs/config/config_options.py", line 8, in <module>
from mkdocs import utils, theme, plugins
File "/usr/local/lib/python3.9/site-packages/mkdocs/theme.py", line 6, in <module>
from mkdocs.utils import filters
File "/usr/local/lib/python3.9/site-packages/mkdocs/utils/filters.py", line 13, in <module>
@jinja2.contextfilter
AttributeError: module 'jinja2' has no attribute 'contextfilter'
```
However, if I install jinja2 3.0.0:
```
admin@host ui % pip install jinja2==3.0.0
Collecting jinja2==3.0.0
Using cached Jinja2-3.0.0-py3-none-any.whl (133 kB)
Requirement already satisfied: MarkupSafe>=2.0.0rc2 in /usr/local/lib/python3.9/site-packages (from jinja2==3.0.0) (2.1.1)
Installing collected packages: jinja2
Attempting uninstall: jinja2
Found existing installation: Jinja2 3.1.0
Uninstalling Jinja2-3.1.0:
Successfully uninstalled Jinja2-3.1.0
Successfully installed jinja2-3.0.0
admin@host ui % mkdocs build
INFO - Cleaning site directory
INFO - Building documentation to directory: /Users/admin/git/searchlab/ui/site
INFO - Documentation built in 0.33 seconds
```
- mkdocs can be patched by explicitly installing jinja2 3.0.0.
- maybe this is not a mkdocs bug, but a jinja2 bug; however, this could be patched in mkdocs as well.
Prevent error with Jinja2 v3.1
Fixes #2794
See [Jinja2 v3.0.0 changes](https://jinja.palletsprojects.com/en/3.0.x/changes/#version-3-0-0):
> The function and filter decorators have been renamed and unified. The old names are deprecated...
>
> - `pass_context` replaces `contextfunction` and `contextfilter`.
</issue>
<code>
[start of mkdocs/utils/filters.py]
1 import jinja2
2
3 from mkdocs.utils import normalize_url
4
5
6 @jinja2.contextfilter
7 def url_filter(context, value):
8 """ A Template filter to normalize URLs. """
9 return normalize_url(value, page=context['page'], base=context['base_url'])
10
[end of mkdocs/utils/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkdocs/utils/filters.py b/mkdocs/utils/filters.py
--- a/mkdocs/utils/filters.py
+++ b/mkdocs/utils/filters.py
@@ -1,9 +1,12 @@
-import jinja2
+try:
+ from jinja2 import pass_context as contextfilter
+except ImportError:
+ from jinja2 import contextfilter
from mkdocs.utils import normalize_url
[email protected]
+@contextfilter
def url_filter(context, value):
""" A Template filter to normalize URLs. """
return normalize_url(value, page=context['page'], base=context['base_url'])
| {"golden_diff": "diff --git a/mkdocs/utils/filters.py b/mkdocs/utils/filters.py\n--- a/mkdocs/utils/filters.py\n+++ b/mkdocs/utils/filters.py\n@@ -1,9 +1,12 @@\n-import jinja2\n+try:\n+ from jinja2 import pass_context as contextfilter\n+except ImportError:\n+ from jinja2 import contextfilter\n \n from mkdocs.utils import normalize_url\n \n \[email protected]\n+@contextfilter\n def url_filter(context, value):\n \"\"\" A Template filter to normalize URLs. \"\"\"\n return normalize_url(value, page=context['page'], base=context['base_url'])\n", "issue": "jinja2 3.1.0 breaks mkdocs\nsince the jinja2 3.1.0 release mkdocs does not work any more:\r\n\r\n```\r\nadmin@host ui % pip install jinja2==3.1.0\r\nCollecting jinja2==3.1.0\r\n Using cached Jinja2-3.1.0-py3-none-any.whl (132 kB)\r\nInstalling collected packages: jinja2\r\n Attempting uninstall: jinja2\r\n Found existing installation: Jinja2 3.0.0\r\n Uninstalling Jinja2-3.0.0:\r\n Successfully uninstalled Jinja2-3.0.0\r\nSuccessfully installed jinja2-3.1.0\r\nadmin@host ui % mkdocs build\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/mkdocs\", line 8, in <module>\r\n sys.exit(cli())\r\n File \"/usr/local/lib/python3.9/site-packages/click/core.py\", line 829, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/click/core.py\", line 782, in main\r\n rv = self.invoke(ctx)\r\n File \"/usr/local/lib/python3.9/site-packages/click/core.py\", line 1259, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/usr/local/lib/python3.9/site-packages/click/core.py\", line 1066, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/usr/local/lib/python3.9/site-packages/click/core.py\", line 610, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/mkdocs/__main__.py\", line 187, in build_command\r\n build.build(config.load_config(**kwargs), dirty=not clean)\r\n File \"/usr/local/lib/python3.9/site-packages/mkdocs/config/base.py\", line 216, in load_config\r\n from mkdocs.config.defaults import get_schema\r\n File \"/usr/local/lib/python3.9/site-packages/mkdocs/config/defaults.py\", line 1, in <module>\r\n from mkdocs.config import config_options\r\n File \"/usr/local/lib/python3.9/site-packages/mkdocs/config/config_options.py\", line 8, in <module>\r\n from mkdocs import utils, theme, plugins\r\n File \"/usr/local/lib/python3.9/site-packages/mkdocs/theme.py\", line 6, in <module>\r\n from mkdocs.utils import filters\r\n File \"/usr/local/lib/python3.9/site-packages/mkdocs/utils/filters.py\", line 13, in <module>\r\n @jinja2.contextfilter\r\nAttributeError: module 'jinja2' has no attribute 'contextfilter'\r\n```\r\nHowever, if I install jinja2 3.0.0:\r\n```\r\nadmin@host ui % pip install jinja2==3.0.0\r\nCollecting jinja2==3.0.0\r\n Using cached Jinja2-3.0.0-py3-none-any.whl (133 kB)\r\nRequirement already satisfied: MarkupSafe>=2.0.0rc2 in /usr/local/lib/python3.9/site-packages (from jinja2==3.0.0) (2.1.1)\r\nInstalling collected packages: jinja2\r\n Attempting uninstall: jinja2\r\n Found existing installation: Jinja2 3.1.0\r\n Uninstalling Jinja2-3.1.0:\r\n Successfully uninstalled Jinja2-3.1.0\r\nSuccessfully installed jinja2-3.0.0\r\nadmin@host ui % mkdocs build\r\nINFO - Cleaning site directory\r\nINFO - Building documentation to directory: /Users/admin/git/searchlab/ui/site\r\nINFO - Documentation built in 0.33 seconds\r\n```\r\n\r\n- mkdocs can be patched by explicitly installing jinja2 3.0.0.\r\n- maybe this is not a mkdocs bug, but a jinja2 bug; however, this could be patched in mkdocs as well.\r\n\nPrevent error with Jinja2 v3.1\nFixes #2794\r\n\r\nSee [Jinja2 v3.0.0 changes](https://jinja.palletsprojects.com/en/3.0.x/changes/#version-3-0-0):\r\n\r\n> The function and filter decorators have been renamed and unified. The old names are deprecated...\r\n> \r\n> - `pass_context` replaces `contextfunction` and `contextfilter`.\n", "before_files": [{"content": "import jinja2\n\nfrom mkdocs.utils import normalize_url\n\n\[email protected]\ndef url_filter(context, value):\n \"\"\" A Template filter to normalize URLs. \"\"\"\n return normalize_url(value, page=context['page'], base=context['base_url'])\n", "path": "mkdocs/utils/filters.py"}]} | 1,619 | 143 |
gh_patches_debug_4805 | rasdani/github-patches | git_diff | statsmodels__statsmodels-680 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Test Baxter King band-pass filter fails with scipy 0.12 beta1
current git head 1b12824f53b1 and 0.4.3, python2.7 tested on ubuntu 13.04 amd64, haven't tried scipy 0.11 but it the test seems to work with the 0.10.1 package
```
======================================================================
ERROR: Test Baxter King band-pass filter. Results are taken from Stata
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/statsmodels/statsmodels/tsa/filters/tests/test_filters.py", line 47, in test_bking1d
Y = bkfilter(X, 6, 32, 12)
File "/statsmodels/statsmodels/tsa/filters/bk_filter.py", line 72, in bkfilter
return fftconvolve(bweights, X, mode='valid') # get a centered moving avg/
File "/usr/lib/python2.7/dist-packages/scipy/signal/signaltools.py", line 204, in fftconvolve
_check_valid_mode_shapes(s1, s2)
File "/usr/lib/python2.7/dist-packages/scipy/signal/signaltools.py", line 64, in _check_valid_mode_shapes
"in1 should have at least as many items as in2 in "
ValueError: in1 should have at least as many items as in2 in every dimension for 'valid' mode.
----------------------------------------------------------------------
```
</issue>
<code>
[start of statsmodels/tsa/filters/bk_filter.py]
1 import numpy as np
2 from scipy.signal import fftconvolve
3
4 def bkfilter(X, low=6, high=32, K=12):
5 """
6 Baxter-King bandpass filter
7
8 Parameters
9 ----------
10 X : array-like
11 A 1 or 2d ndarray. If 2d, variables are assumed to be in columns.
12 low : float
13 Minimum period for oscillations, ie., Baxter and King suggest that
14 the Burns-Mitchell U.S. business cycle has 6 for quarterly data and
15 1.5 for annual data.
16 high : float
17 Maximum period for oscillations BK suggest that the U.S.
18 business cycle has 32 for quarterly data and 8 for annual data.
19 K : int
20 Lead-lag length of the filter. Baxter and King propose a truncation
21 length of 12 for quarterly data and 3 for annual data.
22
23 Returns
24 -------
25 Y : array
26 Cyclical component of X
27
28 References
29 ---------- ::
30 Baxter, M. and R. G. King. "Measuring Business Cycles: Approximate
31 Band-Pass Filters for Economic Time Series." *Review of Economics and
32 Statistics*, 1999, 81(4), 575-593.
33
34 Notes
35 -----
36 Returns a centered weighted moving average of the original series. Where
37 the weights a[j] are computed ::
38
39 a[j] = b[j] + theta, for j = 0, +/-1, +/-2, ... +/- K
40 b[0] = (omega_2 - omega_1)/pi
41 b[j] = 1/(pi*j)(sin(omega_2*j)-sin(omega_1*j), for j = +/-1, +/-2,...
42
43 and theta is a normalizing constant ::
44
45 theta = -sum(b)/(2K+1)
46
47 Examples
48 --------
49 >>> import statsmodels.api as sm
50 >>> dta = sm.datasets.macrodata.load()
51 >>> X = dta.data['realinv']
52 >>> Y = sm.tsa.filters.bkfilter(X, 6, 24, 12)
53 """
54 #TODO: change the docstring to ..math::?
55 #TODO: allow windowing functions to correct for Gibb's Phenomenon?
56 # adjust bweights (symmetrically) by below before demeaning
57 # Lancosz Sigma Factors np.sinc(2*j/(2.*K+1))
58 if low < 2:
59 raise ValueError("low cannot be less than 2")
60 X = np.asarray(X)
61 omega_1 = 2.*np.pi/high # convert from freq. to periodicity
62 omega_2 = 2.*np.pi/low
63 bweights = np.zeros(2*K+1)
64 bweights[K] = (omega_2 - omega_1)/np.pi # weight at zero freq.
65 j = np.arange(1,int(K)+1)
66 weights = 1/(np.pi*j)*(np.sin(omega_2*j)-np.sin(omega_1*j))
67 bweights[K+j] = weights # j is an idx
68 bweights[:K] = weights[::-1] # make symmetric weights
69 bweights -= bweights.mean() # make sure weights sum to zero
70 if X.ndim == 2:
71 bweights = bweights[:,None]
72 return fftconvolve(bweights, X, mode='valid') # get a centered moving avg/
73 # convolution
74
[end of statsmodels/tsa/filters/bk_filter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/statsmodels/tsa/filters/bk_filter.py b/statsmodels/tsa/filters/bk_filter.py
--- a/statsmodels/tsa/filters/bk_filter.py
+++ b/statsmodels/tsa/filters/bk_filter.py
@@ -69,5 +69,5 @@
bweights -= bweights.mean() # make sure weights sum to zero
if X.ndim == 2:
bweights = bweights[:,None]
- return fftconvolve(bweights, X, mode='valid') # get a centered moving avg/
+ return fftconvolve(X, bweights, mode='valid') # get a centered moving avg/
# convolution
| {"golden_diff": "diff --git a/statsmodels/tsa/filters/bk_filter.py b/statsmodels/tsa/filters/bk_filter.py\n--- a/statsmodels/tsa/filters/bk_filter.py\n+++ b/statsmodels/tsa/filters/bk_filter.py\n@@ -69,5 +69,5 @@\n bweights -= bweights.mean() # make sure weights sum to zero\n if X.ndim == 2:\n bweights = bweights[:,None]\n- return fftconvolve(bweights, X, mode='valid') # get a centered moving avg/\n+ return fftconvolve(X, bweights, mode='valid') # get a centered moving avg/\n # convolution\n", "issue": "Test Baxter King band-pass filter fails with scipy 0.12 beta1\ncurrent git head 1b12824f53b1 and 0.4.3, python2.7 tested on ubuntu 13.04 amd64, haven't tried scipy 0.11 but it the test seems to work with the 0.10.1 package\n\n```\n======================================================================\nERROR: Test Baxter King band-pass filter. Results are taken from Stata\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"/usr/lib/python2.7/dist-packages/nose/case.py\", line 197, in runTest\n self.test(*self.arg)\n File \"/statsmodels/statsmodels/tsa/filters/tests/test_filters.py\", line 47, in test_bking1d\n Y = bkfilter(X, 6, 32, 12)\n File \"/statsmodels/statsmodels/tsa/filters/bk_filter.py\", line 72, in bkfilter\n return fftconvolve(bweights, X, mode='valid') # get a centered moving avg/\n File \"/usr/lib/python2.7/dist-packages/scipy/signal/signaltools.py\", line 204, in fftconvolve\n _check_valid_mode_shapes(s1, s2)\n File \"/usr/lib/python2.7/dist-packages/scipy/signal/signaltools.py\", line 64, in _check_valid_mode_shapes\n \"in1 should have at least as many items as in2 in \"\nValueError: in1 should have at least as many items as in2 in every dimension for 'valid' mode.\n\n----------------------------------------------------------------------\n```\n\n", "before_files": [{"content": "import numpy as np\nfrom scipy.signal import fftconvolve\n\ndef bkfilter(X, low=6, high=32, K=12):\n \"\"\"\n Baxter-King bandpass filter\n\n Parameters\n ----------\n X : array-like\n A 1 or 2d ndarray. If 2d, variables are assumed to be in columns.\n low : float\n Minimum period for oscillations, ie., Baxter and King suggest that\n the Burns-Mitchell U.S. business cycle has 6 for quarterly data and\n 1.5 for annual data.\n high : float\n Maximum period for oscillations BK suggest that the U.S.\n business cycle has 32 for quarterly data and 8 for annual data.\n K : int\n Lead-lag length of the filter. Baxter and King propose a truncation\n length of 12 for quarterly data and 3 for annual data.\n\n Returns\n -------\n Y : array\n Cyclical component of X\n\n References\n ---------- ::\n Baxter, M. and R. G. King. \"Measuring Business Cycles: Approximate\n Band-Pass Filters for Economic Time Series.\" *Review of Economics and\n Statistics*, 1999, 81(4), 575-593.\n\n Notes\n -----\n Returns a centered weighted moving average of the original series. Where\n the weights a[j] are computed ::\n\n a[j] = b[j] + theta, for j = 0, +/-1, +/-2, ... +/- K\n b[0] = (omega_2 - omega_1)/pi\n b[j] = 1/(pi*j)(sin(omega_2*j)-sin(omega_1*j), for j = +/-1, +/-2,...\n\n and theta is a normalizing constant ::\n\n theta = -sum(b)/(2K+1)\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> dta = sm.datasets.macrodata.load()\n >>> X = dta.data['realinv']\n >>> Y = sm.tsa.filters.bkfilter(X, 6, 24, 12)\n \"\"\"\n#TODO: change the docstring to ..math::?\n#TODO: allow windowing functions to correct for Gibb's Phenomenon?\n# adjust bweights (symmetrically) by below before demeaning\n# Lancosz Sigma Factors np.sinc(2*j/(2.*K+1))\n if low < 2:\n raise ValueError(\"low cannot be less than 2\")\n X = np.asarray(X)\n omega_1 = 2.*np.pi/high # convert from freq. to periodicity\n omega_2 = 2.*np.pi/low\n bweights = np.zeros(2*K+1)\n bweights[K] = (omega_2 - omega_1)/np.pi # weight at zero freq.\n j = np.arange(1,int(K)+1)\n weights = 1/(np.pi*j)*(np.sin(omega_2*j)-np.sin(omega_1*j))\n bweights[K+j] = weights # j is an idx\n bweights[:K] = weights[::-1] # make symmetric weights\n bweights -= bweights.mean() # make sure weights sum to zero\n if X.ndim == 2:\n bweights = bweights[:,None]\n return fftconvolve(bweights, X, mode='valid') # get a centered moving avg/\n # convolution\n", "path": "statsmodels/tsa/filters/bk_filter.py"}]} | 1,807 | 146 |
gh_patches_debug_10840 | rasdani/github-patches | git_diff | nf-core__tools-1263 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Non-updated lint tip message
## Description of the bug
Linting with files applicable for auto-fixing gives the following message:
```
Tip: Some of these linting errors can automatically be resolved with the following command:
nf-core lint . --fix files_unchanged
```
However the `.` declaration has been removed in the latest version of nf-core tools, and so the command errors
```
Error: Got unexpected extra argument (.)
```
## Steps to reproduce
Modify a template file (e.g. `.github/CONTRIBUTING.md`, and run lint command
## Expected behaviour
Update message to remove `.`
## System
- nf-core tools version: 2.1
## Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of nf_core/lint_utils.py]
1 import rich
2 from rich.console import Console
3 from rich.table import Table
4 import logging
5
6 import nf_core.utils
7
8 log = logging.getLogger(__name__)
9
10 # Create a console used by all lint tests
11 console = Console(force_terminal=nf_core.utils.rich_force_colors())
12
13
14 def print_joint_summary(lint_obj, module_lint_obj):
15 """Print a joint summary of the general pipe lint tests and the module lint tests"""
16 nbr_passed = len(lint_obj.passed) + len(module_lint_obj.passed)
17 nbr_ignored = len(lint_obj.ignored)
18 nbr_fixed = len(lint_obj.fixed)
19 nbr_warned = len(lint_obj.warned) + len(module_lint_obj.warned)
20 nbr_failed = len(lint_obj.failed) + len(module_lint_obj.failed)
21
22 def _s(some_length):
23 return "" if some_length == 1 else "s"
24
25 summary_colour = "red" if nbr_failed > 0 else "green"
26 table = Table(box=rich.box.ROUNDED, style=summary_colour)
27 table.add_column(f"LINT RESULTS SUMMARY".format(nbr_passed), no_wrap=True)
28 table.add_row(r"[green][✔] {:>3} Test{} Passed".format(nbr_passed, _s(nbr_passed)))
29 if nbr_fixed:
30 table.add_row(r"[bright blue][?] {:>3} Test{} Fixed".format(nbr_fixed, _s(nbr_fixed)))
31 table.add_row(r"[grey58][?] {:>3} Test{} Ignored".format(nbr_ignored, _s(nbr_ignored)))
32 table.add_row(r"[yellow][!] {:>3} Test Warning{}".format(nbr_warned, _s(nbr_warned)))
33 table.add_row(r"[red][✗] {:>3} Test{} Failed".format(nbr_failed, _s(nbr_failed)))
34 console.print(table)
35
36
37 def print_fixes(lint_obj, module_lint_obj):
38 """Prints available and applied fixes"""
39
40 if len(lint_obj.could_fix):
41 fix_cmd = "nf-core lint {} --fix {}".format(lint_obj.wf_path, " --fix ".join(lint_obj.could_fix))
42 console.print(
43 f"\nTip: Some of these linting errors can automatically be resolved with the following command:\n\n[blue] {fix_cmd}\n"
44 )
45 if len(lint_obj.fix):
46 console.print(
47 "Automatic fixes applied. Please check with 'git diff' and revert any changes you do not want with 'git checkout <file>'."
48 )
49
[end of nf_core/lint_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nf_core/lint_utils.py b/nf_core/lint_utils.py
--- a/nf_core/lint_utils.py
+++ b/nf_core/lint_utils.py
@@ -38,7 +38,9 @@
"""Prints available and applied fixes"""
if len(lint_obj.could_fix):
- fix_cmd = "nf-core lint {} --fix {}".format(lint_obj.wf_path, " --fix ".join(lint_obj.could_fix))
+ fix_cmd = "nf-core lint {}--fix {}".format(
+ "" if lint_obj.wf_path == "." else f"--dir {lint_obj.wf_path}", " --fix ".join(lint_obj.could_fix)
+ )
console.print(
f"\nTip: Some of these linting errors can automatically be resolved with the following command:\n\n[blue] {fix_cmd}\n"
)
| {"golden_diff": "diff --git a/nf_core/lint_utils.py b/nf_core/lint_utils.py\n--- a/nf_core/lint_utils.py\n+++ b/nf_core/lint_utils.py\n@@ -38,7 +38,9 @@\n \"\"\"Prints available and applied fixes\"\"\"\n \n if len(lint_obj.could_fix):\n- fix_cmd = \"nf-core lint {} --fix {}\".format(lint_obj.wf_path, \" --fix \".join(lint_obj.could_fix))\n+ fix_cmd = \"nf-core lint {}--fix {}\".format(\n+ \"\" if lint_obj.wf_path == \".\" else f\"--dir {lint_obj.wf_path}\", \" --fix \".join(lint_obj.could_fix)\n+ )\n console.print(\n f\"\\nTip: Some of these linting errors can automatically be resolved with the following command:\\n\\n[blue] {fix_cmd}\\n\"\n )\n", "issue": "Non-updated lint tip message\n## Description of the bug\r\n\r\nLinting with files applicable for auto-fixing gives the following message:\r\n\r\n```\r\nTip: Some of these linting errors can automatically be resolved with the following command:\r\n\r\n nf-core lint . --fix files_unchanged\r\n```\r\n\r\nHowever the `.` declaration has been removed in the latest version of nf-core tools, and so the command errors\r\n\r\n```\r\nError: Got unexpected extra argument (.)\r\n```\r\n\r\n## Steps to reproduce\r\n\r\nModify a template file (e.g. `.github/CONTRIBUTING.md`, and run lint command\r\n\r\n## Expected behaviour\r\n\r\nUpdate message to remove `.`\r\n\r\n## System\r\n\r\n- nf-core tools version: 2.1\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "import rich\nfrom rich.console import Console\nfrom rich.table import Table\nimport logging\n\nimport nf_core.utils\n\nlog = logging.getLogger(__name__)\n\n# Create a console used by all lint tests\nconsole = Console(force_terminal=nf_core.utils.rich_force_colors())\n\n\ndef print_joint_summary(lint_obj, module_lint_obj):\n \"\"\"Print a joint summary of the general pipe lint tests and the module lint tests\"\"\"\n nbr_passed = len(lint_obj.passed) + len(module_lint_obj.passed)\n nbr_ignored = len(lint_obj.ignored)\n nbr_fixed = len(lint_obj.fixed)\n nbr_warned = len(lint_obj.warned) + len(module_lint_obj.warned)\n nbr_failed = len(lint_obj.failed) + len(module_lint_obj.failed)\n\n def _s(some_length):\n return \"\" if some_length == 1 else \"s\"\n\n summary_colour = \"red\" if nbr_failed > 0 else \"green\"\n table = Table(box=rich.box.ROUNDED, style=summary_colour)\n table.add_column(f\"LINT RESULTS SUMMARY\".format(nbr_passed), no_wrap=True)\n table.add_row(r\"[green][\u2714] {:>3} Test{} Passed\".format(nbr_passed, _s(nbr_passed)))\n if nbr_fixed:\n table.add_row(r\"[bright blue][?] {:>3} Test{} Fixed\".format(nbr_fixed, _s(nbr_fixed)))\n table.add_row(r\"[grey58][?] {:>3} Test{} Ignored\".format(nbr_ignored, _s(nbr_ignored)))\n table.add_row(r\"[yellow][!] {:>3} Test Warning{}\".format(nbr_warned, _s(nbr_warned)))\n table.add_row(r\"[red][\u2717] {:>3} Test{} Failed\".format(nbr_failed, _s(nbr_failed)))\n console.print(table)\n\n\ndef print_fixes(lint_obj, module_lint_obj):\n \"\"\"Prints available and applied fixes\"\"\"\n\n if len(lint_obj.could_fix):\n fix_cmd = \"nf-core lint {} --fix {}\".format(lint_obj.wf_path, \" --fix \".join(lint_obj.could_fix))\n console.print(\n f\"\\nTip: Some of these linting errors can automatically be resolved with the following command:\\n\\n[blue] {fix_cmd}\\n\"\n )\n if len(lint_obj.fix):\n console.print(\n \"Automatic fixes applied. Please check with 'git diff' and revert any changes you do not want with 'git checkout <file>'.\"\n )\n", "path": "nf_core/lint_utils.py"}]} | 1,347 | 197 |
gh_patches_debug_23895 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2870 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider rentacenter is broken
During the global build at 2021-05-26-14-42-23, spider **rentacenter** failed with **2196 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/rentacenter.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rentacenter.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rentacenter.geojson))
</issue>
<code>
[start of locations/spiders/rentacenter.py]
1 import json
2 import scrapy
3 import re
4 from scrapy.selector import Selector
5
6 from locations.items import GeojsonPointItem
7 from locations.hours import OpeningHours
8
9
10 DAY_MAPPING = {
11 'Monday': 'Mo',
12 'Tuesday': 'Tu',
13 'Wednesday': 'We',
14 'Thursday': 'Th',
15 'Friday': 'Fr',
16 'Saturday': 'Sa',
17 'Sunday': 'Su'
18 }
19
20
21 class RentACenterSpider(scrapy.Spider):
22 name = "rentacenter"
23 item_attributes = { 'brand': "Rent-A-Center" }
24 allowed_domains = ["rentacenter.com"]
25
26 start_urls = [
27 "https://locations.rentacenter.com/sitemap.xml",
28 ]
29
30 def parse_hours(self, hours):
31 opening_hours = OpeningHours()
32
33 for hour in hours:
34 opening_hours.add_range(day=DAY_MAPPING[hour["dayOfWeek"].replace('http://schema.org/', '')],
35 open_time=hour["opens"],
36 close_time=hour["closes"],
37 time_format='%H:%M:%S')
38
39 return opening_hours.as_opening_hours()
40
41 def parse_location(self, response):
42 data = response.xpath('//script[@type="application/ld+json"]/text()').extract_first()
43 data = json.loads(data)
44
45 ref = data.get("branchCode")
46 if not ref:
47 return # not a store page
48
49 properties = {
50 'addr_full': data["address"]["streetAddress"],
51 'phone': data["telephone"],
52 'city': data["address"]["addressLocality"],
53 'state': data["address"]["addressRegion"],
54 'postcode': data["address"]["postalCode"],
55 'country': 'US',
56 'ref': ref,
57 'website': response.url,
58 'lat': float(data["geo"]["latitude"]),
59 'lon': float(data["geo"]["longitude"]),
60 'name': data["name"]
61 }
62
63 hours = self.parse_hours(data.get("openingHoursSpecification", []))
64 if hours:
65 properties["opening_hours"] = hours
66
67 yield GeojsonPointItem(**properties)
68
69 def parse_state_sitemap(self, response):
70 xml = Selector(response)
71 xml.remove_namespaces()
72
73 urls = xml.xpath('//loc/text()').extract()
74 urls = [url.strip() for url in urls]
75
76 # individual store pages are listed at top, then a state page, then bunch of other non-store pages
77 # find the index position of the state page and then only parse urls before that
78 i = urls.index(re.search(r'^(https://locations.rentacenter.com/.+?)/.*$', urls[0]).groups()[0] + '/')
79 for url in urls[:i]:
80 yield scrapy.Request(url, callback=self.parse_location)
81
82 def parse(self, response):
83 xml = Selector(response)
84 xml.remove_namespaces()
85
86 urls = xml.xpath('//loc/text()').extract()
87 urls = [url.strip() for url in urls]
88
89 for url in urls:
90 yield scrapy.Request(url, callback=self.parse_state_sitemap)
91
92
[end of locations/spiders/rentacenter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/rentacenter.py b/locations/spiders/rentacenter.py
--- a/locations/spiders/rentacenter.py
+++ b/locations/spiders/rentacenter.py
@@ -48,15 +48,15 @@
properties = {
'addr_full': data["address"]["streetAddress"],
- 'phone': data["telephone"],
+ 'phone': data.get("telephone"),
'city': data["address"]["addressLocality"],
'state': data["address"]["addressRegion"],
'postcode': data["address"]["postalCode"],
'country': 'US',
'ref': ref,
'website': response.url,
- 'lat': float(data["geo"]["latitude"]),
- 'lon': float(data["geo"]["longitude"]),
+ 'lat': data["geo"]["latitude"],
+ 'lon': data["geo"]["longitude"],
'name': data["name"]
}
@@ -87,5 +87,7 @@
urls = [url.strip() for url in urls]
for url in urls:
+ if '/home/' in url:
+ continue
yield scrapy.Request(url, callback=self.parse_state_sitemap)
| {"golden_diff": "diff --git a/locations/spiders/rentacenter.py b/locations/spiders/rentacenter.py\n--- a/locations/spiders/rentacenter.py\n+++ b/locations/spiders/rentacenter.py\n@@ -48,15 +48,15 @@\n \n properties = {\n 'addr_full': data[\"address\"][\"streetAddress\"],\n- 'phone': data[\"telephone\"],\n+ 'phone': data.get(\"telephone\"),\n 'city': data[\"address\"][\"addressLocality\"],\n 'state': data[\"address\"][\"addressRegion\"],\n 'postcode': data[\"address\"][\"postalCode\"],\n 'country': 'US',\n 'ref': ref,\n 'website': response.url,\n- 'lat': float(data[\"geo\"][\"latitude\"]),\n- 'lon': float(data[\"geo\"][\"longitude\"]),\n+ 'lat': data[\"geo\"][\"latitude\"],\n+ 'lon': data[\"geo\"][\"longitude\"],\n 'name': data[\"name\"]\n }\n \n@@ -87,5 +87,7 @@\n urls = [url.strip() for url in urls]\n \n for url in urls:\n+ if '/home/' in url:\n+ continue\n yield scrapy.Request(url, callback=self.parse_state_sitemap)\n", "issue": "Spider rentacenter is broken\nDuring the global build at 2021-05-26-14-42-23, spider **rentacenter** failed with **2196 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/rentacenter.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rentacenter.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rentacenter.geojson))\n", "before_files": [{"content": "import json\nimport scrapy\nimport re\nfrom scrapy.selector import Selector\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nDAY_MAPPING = {\n 'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'\n}\n\n\nclass RentACenterSpider(scrapy.Spider):\n name = \"rentacenter\"\n item_attributes = { 'brand': \"Rent-A-Center\" }\n allowed_domains = [\"rentacenter.com\"]\n\n start_urls = [\n \"https://locations.rentacenter.com/sitemap.xml\",\n ]\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n opening_hours.add_range(day=DAY_MAPPING[hour[\"dayOfWeek\"].replace('http://schema.org/', '')],\n open_time=hour[\"opens\"],\n close_time=hour[\"closes\"],\n time_format='%H:%M:%S')\n\n return opening_hours.as_opening_hours()\n\n def parse_location(self, response):\n data = response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first()\n data = json.loads(data)\n\n ref = data.get(\"branchCode\")\n if not ref:\n return # not a store page\n\n properties = {\n 'addr_full': data[\"address\"][\"streetAddress\"],\n 'phone': data[\"telephone\"],\n 'city': data[\"address\"][\"addressLocality\"],\n 'state': data[\"address\"][\"addressRegion\"],\n 'postcode': data[\"address\"][\"postalCode\"],\n 'country': 'US',\n 'ref': ref,\n 'website': response.url,\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n 'name': data[\"name\"]\n }\n\n hours = self.parse_hours(data.get(\"openingHoursSpecification\", []))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse_state_sitemap(self, response):\n xml = Selector(response)\n xml.remove_namespaces()\n\n urls = xml.xpath('//loc/text()').extract()\n urls = [url.strip() for url in urls]\n\n # individual store pages are listed at top, then a state page, then bunch of other non-store pages\n # find the index position of the state page and then only parse urls before that\n i = urls.index(re.search(r'^(https://locations.rentacenter.com/.+?)/.*$', urls[0]).groups()[0] + '/')\n for url in urls[:i]:\n yield scrapy.Request(url, callback=self.parse_location)\n\n def parse(self, response):\n xml = Selector(response)\n xml.remove_namespaces()\n\n urls = xml.xpath('//loc/text()').extract()\n urls = [url.strip() for url in urls]\n\n for url in urls:\n yield scrapy.Request(url, callback=self.parse_state_sitemap)\n\n", "path": "locations/spiders/rentacenter.py"}]} | 1,568 | 267 |
gh_patches_debug_64320 | rasdani/github-patches | git_diff | pex-tool__pex-1442 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.48
On the docket:
+ [x] Remove zipapp execution mode & introduce --layout. #1438
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.47"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.47"
+__version__ = "2.1.48"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.47\"\n+__version__ = \"2.1.48\"\n", "issue": "Release 2.1.48\nOn the docket:\r\n+ [x] Remove zipapp execution mode & introduce --layout. #1438 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.47\"\n", "path": "pex/version.py"}]} | 618 | 97 |
gh_patches_debug_34315 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-3540 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Revert component-based scripts lookup
### Is your feature request related to a problem? Please describe
In https://github.com/opensearch-project/opensearch-build/pull/2934 we have added a way to include component build scripts. There are several problems with that.
1. In https://github.com/opensearch-project/opensearch-build/issues/2188 we agreed to split monorepos. The maps monorepo came in as another new kind of monorepo that should not be supported.
2. There are no tests in the PR.
3. The new feature of path lookup is not documented in https://github.com/opensearch-project/opensearch-build/blob/cbe6841a3c459b65d5fb17b713994e5c01d7ee8e/src/paths/script_finder.py#L27
### Describe the solution you'd like
1. Add tests and documentation on top of https://github.com/opensearch-project/opensearch-build/pull/2934.
1. Split the maps monorepo, https://github.com/opensearch-project/opensearch-build/issues/2188
1. Revert https://github.com/opensearch-project/opensearch-build/pull/2934
### Describe alternatives you've considered
_No response_
### Additional context
_No response_
</issue>
<code>
[start of src/paths/script_finder.py]
1 # Copyright OpenSearch Contributors
2 # SPDX-License-Identifier: Apache-2.0
3 #
4 # The OpenSearch Contributors require contributions made to
5 # this file be licensed under the Apache-2.0 license or a
6 # compatible open source license.
7
8 import os
9 from typing import Callable, List
10
11
12 class ScriptFinder:
13 class ScriptNotFoundError(Exception):
14 def __init__(self, kind: str, paths: List[str]) -> None:
15 self.kind = kind
16 self.paths = paths
17 super().__init__(f"Could not find {kind} script. Looked in {paths}.")
18
19 component_scripts_path = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.join("..", "..", "scripts", "components")))
20
21 default_scripts_path = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.join("..", "..", "scripts", "default")))
22
23 """
24 ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts.
25
26 For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,
27 it will look in the following locations, in order:
28 * <component_scripts_path>/<component_name>/<script-name>
29 * /<component_name>/<script-name> in the component's Git repository
30 * root of the component's Git repository
31 * /scripts/<script-name> in the component's Git repository
32 * <default_scripts_path>/<script-name>
33
34 For install.sh scripts, given a component name, it will look in the following locations, in order:
35 * <component_scripts_path>/<component_name>/<script-name>
36 * <default_scripts_path>/<script-name>
37 """
38
39 @classmethod
40 def __find_script(cls, name: str, paths: List[str]) -> str:
41 exists: Callable[[str], bool] = lambda path: os.path.exists(path)
42 script = next(filter(exists, paths), None)
43 if script is None:
44 raise ScriptFinder.ScriptNotFoundError(name, paths)
45
46 return script
47
48 @classmethod
49 def __find_named_script(cls, script_name: str, component_name: str, git_dir: str) -> str:
50 paths = [
51 os.path.realpath(os.path.join(cls.component_scripts_path, component_name, script_name)),
52 os.path.realpath(os.path.join(git_dir, component_name, script_name)),
53 os.path.realpath(os.path.join(git_dir, script_name)),
54 os.path.realpath(os.path.join(git_dir, "scripts", script_name)),
55 os.path.realpath(os.path.join(cls.default_scripts_path, script_name)),
56 ]
57
58 return cls.__find_script(script_name, paths)
59
60 @classmethod
61 def find_build_script(cls, project: str, component_name: str, git_dir: str) -> str:
62 paths = [
63 os.path.realpath(os.path.join(cls.component_scripts_path, component_name, "build.sh")),
64 os.path.realpath(os.path.join(git_dir, component_name, "build.sh")),
65 os.path.realpath(os.path.join(git_dir, "build.sh")),
66 os.path.realpath(os.path.join(git_dir, "scripts", "build.sh")),
67 os.path.realpath(
68 os.path.join(
69 cls.default_scripts_path,
70 project.replace(" ", "-").lower(),
71 "build.sh",
72 )
73 ),
74 ]
75
76 return cls.__find_script("build.sh", paths)
77
78 @classmethod
79 def find_install_script(cls, component_name: str) -> str:
80 paths = [
81 os.path.realpath(os.path.join(cls.component_scripts_path, component_name, "install.sh")),
82 os.path.realpath(os.path.join(cls.default_scripts_path, "install.sh")),
83 ]
84
85 return cls.__find_script("install.sh", paths)
86
87 @classmethod
88 def find_integ_test_script(cls, component_name: str, git_dir: str) -> str:
89 return cls.__find_named_script("integtest.sh", component_name, git_dir)
90
91 @classmethod
92 def find_bwc_test_script(cls, component_name: str, git_dir: str) -> str:
93 script_name = "bwctest.sh"
94 paths = [
95 os.path.realpath(os.path.join(cls.default_scripts_path, script_name)),
96 os.path.realpath(os.path.join(cls.component_scripts_path, component_name, script_name)),
97 os.path.realpath(os.path.join(git_dir, component_name, script_name)),
98 os.path.realpath(os.path.join(git_dir, script_name)),
99 os.path.realpath(os.path.join(git_dir, "scripts", script_name)),
100 ]
101
102 return cls.__find_script(script_name, paths)
103
[end of src/paths/script_finder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/paths/script_finder.py b/src/paths/script_finder.py
--- a/src/paths/script_finder.py
+++ b/src/paths/script_finder.py
@@ -26,7 +26,6 @@
For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,
it will look in the following locations, in order:
* <component_scripts_path>/<component_name>/<script-name>
- * /<component_name>/<script-name> in the component's Git repository
* root of the component's Git repository
* /scripts/<script-name> in the component's Git repository
* <default_scripts_path>/<script-name>
@@ -49,7 +48,6 @@
def __find_named_script(cls, script_name: str, component_name: str, git_dir: str) -> str:
paths = [
os.path.realpath(os.path.join(cls.component_scripts_path, component_name, script_name)),
- os.path.realpath(os.path.join(git_dir, component_name, script_name)),
os.path.realpath(os.path.join(git_dir, script_name)),
os.path.realpath(os.path.join(git_dir, "scripts", script_name)),
os.path.realpath(os.path.join(cls.default_scripts_path, script_name)),
@@ -61,7 +59,6 @@
def find_build_script(cls, project: str, component_name: str, git_dir: str) -> str:
paths = [
os.path.realpath(os.path.join(cls.component_scripts_path, component_name, "build.sh")),
- os.path.realpath(os.path.join(git_dir, component_name, "build.sh")),
os.path.realpath(os.path.join(git_dir, "build.sh")),
os.path.realpath(os.path.join(git_dir, "scripts", "build.sh")),
os.path.realpath(
| {"golden_diff": "diff --git a/src/paths/script_finder.py b/src/paths/script_finder.py\n--- a/src/paths/script_finder.py\n+++ b/src/paths/script_finder.py\n@@ -26,7 +26,6 @@\n For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,\n it will look in the following locations, in order:\n * <component_scripts_path>/<component_name>/<script-name>\n- * /<component_name>/<script-name> in the component's Git repository\n * root of the component's Git repository\n * /scripts/<script-name> in the component's Git repository\n * <default_scripts_path>/<script-name>\n@@ -49,7 +48,6 @@\n def __find_named_script(cls, script_name: str, component_name: str, git_dir: str) -> str:\n paths = [\n os.path.realpath(os.path.join(cls.component_scripts_path, component_name, script_name)),\n- os.path.realpath(os.path.join(git_dir, component_name, script_name)),\n os.path.realpath(os.path.join(git_dir, script_name)),\n os.path.realpath(os.path.join(git_dir, \"scripts\", script_name)),\n os.path.realpath(os.path.join(cls.default_scripts_path, script_name)),\n@@ -61,7 +59,6 @@\n def find_build_script(cls, project: str, component_name: str, git_dir: str) -> str:\n paths = [\n os.path.realpath(os.path.join(cls.component_scripts_path, component_name, \"build.sh\")),\n- os.path.realpath(os.path.join(git_dir, component_name, \"build.sh\")),\n os.path.realpath(os.path.join(git_dir, \"build.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts\", \"build.sh\")),\n os.path.realpath(\n", "issue": "Revert component-based scripts lookup\n### Is your feature request related to a problem? Please describe\n\nIn https://github.com/opensearch-project/opensearch-build/pull/2934 we have added a way to include component build scripts. There are several problems with that.\r\n\r\n1. In https://github.com/opensearch-project/opensearch-build/issues/2188 we agreed to split monorepos. The maps monorepo came in as another new kind of monorepo that should not be supported. \r\n2. There are no tests in the PR.\r\n3. The new feature of path lookup is not documented in https://github.com/opensearch-project/opensearch-build/blob/cbe6841a3c459b65d5fb17b713994e5c01d7ee8e/src/paths/script_finder.py#L27\n\n### Describe the solution you'd like\n\n1. Add tests and documentation on top of https://github.com/opensearch-project/opensearch-build/pull/2934.\r\n1. Split the maps monorepo, https://github.com/opensearch-project/opensearch-build/issues/2188\r\n1. Revert https://github.com/opensearch-project/opensearch-build/pull/2934\r\n\r\n\n\n### Describe alternatives you've considered\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# Copyright OpenSearch Contributors\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\nfrom typing import Callable, List\n\n\nclass ScriptFinder:\n class ScriptNotFoundError(Exception):\n def __init__(self, kind: str, paths: List[str]) -> None:\n self.kind = kind\n self.paths = paths\n super().__init__(f\"Could not find {kind} script. Looked in {paths}.\")\n\n component_scripts_path = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.join(\"..\", \"..\", \"scripts\", \"components\")))\n\n default_scripts_path = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.join(\"..\", \"..\", \"scripts\", \"default\")))\n\n \"\"\"\n ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts.\n\n For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,\n it will look in the following locations, in order:\n * <component_scripts_path>/<component_name>/<script-name>\n * /<component_name>/<script-name> in the component's Git repository\n * root of the component's Git repository\n * /scripts/<script-name> in the component's Git repository\n * <default_scripts_path>/<script-name>\n\n For install.sh scripts, given a component name, it will look in the following locations, in order:\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n \"\"\"\n\n @classmethod\n def __find_script(cls, name: str, paths: List[str]) -> str:\n exists: Callable[[str], bool] = lambda path: os.path.exists(path)\n script = next(filter(exists, paths), None)\n if script is None:\n raise ScriptFinder.ScriptNotFoundError(name, paths)\n\n return script\n\n @classmethod\n def __find_named_script(cls, script_name: str, component_name: str, git_dir: str) -> str:\n paths = [\n os.path.realpath(os.path.join(cls.component_scripts_path, component_name, script_name)),\n os.path.realpath(os.path.join(git_dir, component_name, script_name)),\n os.path.realpath(os.path.join(git_dir, script_name)),\n os.path.realpath(os.path.join(git_dir, \"scripts\", script_name)),\n os.path.realpath(os.path.join(cls.default_scripts_path, script_name)),\n ]\n\n return cls.__find_script(script_name, paths)\n\n @classmethod\n def find_build_script(cls, project: str, component_name: str, git_dir: str) -> str:\n paths = [\n os.path.realpath(os.path.join(cls.component_scripts_path, component_name, \"build.sh\")),\n os.path.realpath(os.path.join(git_dir, component_name, \"build.sh\")),\n os.path.realpath(os.path.join(git_dir, \"build.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts\", \"build.sh\")),\n os.path.realpath(\n os.path.join(\n cls.default_scripts_path,\n project.replace(\" \", \"-\").lower(),\n \"build.sh\",\n )\n ),\n ]\n\n return cls.__find_script(\"build.sh\", paths)\n\n @classmethod\n def find_install_script(cls, component_name: str) -> str:\n paths = [\n os.path.realpath(os.path.join(cls.component_scripts_path, component_name, \"install.sh\")),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"install.sh\")),\n ]\n\n return cls.__find_script(\"install.sh\", paths)\n\n @classmethod\n def find_integ_test_script(cls, component_name: str, git_dir: str) -> str:\n return cls.__find_named_script(\"integtest.sh\", component_name, git_dir)\n\n @classmethod\n def find_bwc_test_script(cls, component_name: str, git_dir: str) -> str:\n script_name = \"bwctest.sh\"\n paths = [\n os.path.realpath(os.path.join(cls.default_scripts_path, script_name)),\n os.path.realpath(os.path.join(cls.component_scripts_path, component_name, script_name)),\n os.path.realpath(os.path.join(git_dir, component_name, script_name)),\n os.path.realpath(os.path.join(git_dir, script_name)),\n os.path.realpath(os.path.join(git_dir, \"scripts\", script_name)),\n ]\n\n return cls.__find_script(script_name, paths)\n", "path": "src/paths/script_finder.py"}]} | 2,021 | 388 |
gh_patches_debug_31801 | rasdani/github-patches | git_diff | espnet__espnet-1002 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The scoring script is incorrect if any pruning is done on the dictionary or in the case of word models
Hi @sw005320 and other developers,
The command `json2trn.py ${dir}/data.json ${dic} --num-spkrs ${num_spkrs} --refs ${dir}/ref.trn --hyps ${dir}/hyp.trn` in the `score_sclite.sh` replaces all units in reference that are not in the dictionary with unk and then scores it.
This is wrong because you are modifying the reference transcription to match your dictionary. Let's say I train a model with dictionary with only 1 term "Mr" and the hypothesis is
"Mr unk unk" and reference is "Mr John Doe". This would give a WER of 0% because the reference would be modified to "Mr unk unk".
</issue>
<code>
[start of utils/json2trn.py]
1 #!/usr/bin/env python
2 # encoding: utf-8
3
4 # Copyright 2017 Johns Hopkins University (Shinji Watanabe)
5 # 2018 Xuankai Chang (Shanghai Jiao Tong University)
6 # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
7
8 import argparse
9 import codecs
10 import json
11 import logging
12 import sys
13
14 from espnet.utils.cli_utils import get_commandline_args
15
16
17 def get_parser():
18 parser = argparse.ArgumentParser(
19 description='convert a json to a transcription file with a token dictionary',
20 formatter_class=argparse.ArgumentDefaultsHelpFormatter)
21 parser.add_argument('json', type=str, help='json files')
22 parser.add_argument('dict', type=str, help='dict')
23 parser.add_argument('--num-spkrs', type=int, default=1, help='number of speakers')
24 parser.add_argument('--refs', type=str, nargs='+', help='ref for all speakers')
25 parser.add_argument('--hyps', type=str, nargs='+', help='hyp for all outputs')
26 return parser
27
28
29 def main(args):
30 args = get_parser().parse_args(args)
31 convert(args.json, args.dict, args.refs, args.hyps, args.num_spkrs)
32
33
34 def convert(jsonf, dic, refs, hyps, num_spkrs=1):
35 n_ref = len(refs)
36 n_hyp = len(hyps)
37 assert n_ref == n_hyp
38 assert n_ref == num_spkrs
39
40 # logging info
41 logfmt = '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s'
42 logging.basicConfig(level=logging.INFO, format=logfmt)
43 logging.info(get_commandline_args())
44
45 logging.info("reading %s", jsonf)
46 with codecs.open(jsonf, 'r', encoding="utf-8") as f:
47 j = json.load(f)
48
49 logging.info("reading %s", dic)
50 with codecs.open(dic, 'r', encoding="utf-8") as f:
51 dictionary = f.readlines()
52 char_list = [entry.split(' ')[0] for entry in dictionary]
53 char_list.insert(0, '<blank>')
54 char_list.append('<eos>')
55
56 for ns in range(num_spkrs):
57 hyp_file = codecs.open(hyps[ns], 'w', encoding="utf-8")
58 ref_file = codecs.open(refs[ns], 'w', encoding="utf-8")
59
60 for x in j['utts']:
61 # hyps
62 if num_spkrs == 1:
63 seq = [char_list[int(i)] for i in j['utts'][x]['output'][0]['rec_tokenid'].split()]
64 else:
65 seq = [char_list[int(i)] for i in j['utts'][x]['output'][ns][0]['rec_tokenid'].split()]
66 hyp_file.write(" ".join(seq).replace('<eos>', '')),
67 hyp_file.write(" (" + j['utts'][x]['utt2spk'].replace('-', '_') + "-" + x + ")\n")
68
69 # ref
70 if num_spkrs == 1:
71 seq = [char_list[int(i)] for i in j['utts'][x]['output'][0]['tokenid'].split()]
72 else:
73 seq = [char_list[int(i)] for i in j['utts'][x]['output'][ns][0]['tokenid'].split()]
74 ref_file.write(" ".join(seq).replace('<eos>', '')),
75 ref_file.write(" (" + j['utts'][x]['utt2spk'].replace('-', '_') + "-" + x + ")\n")
76
77 hyp_file.close()
78 ref_file.close()
79
80
81 if __name__ == '__main__':
82 main(sys.argv[1:])
83
[end of utils/json2trn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/utils/json2trn.py b/utils/json2trn.py
--- a/utils/json2trn.py
+++ b/utils/json2trn.py
@@ -58,21 +58,25 @@
ref_file = codecs.open(refs[ns], 'w', encoding="utf-8")
for x in j['utts']:
- # hyps
+ # recognition hypothesis
if num_spkrs == 1:
seq = [char_list[int(i)] for i in j['utts'][x]['output'][0]['rec_tokenid'].split()]
else:
seq = [char_list[int(i)] for i in j['utts'][x]['output'][ns][0]['rec_tokenid'].split()]
+ # In the recognition hypothesis, the <eos> symbol is usually attached in the last part of the sentence
+ # and it is removed below.
hyp_file.write(" ".join(seq).replace('<eos>', '')),
hyp_file.write(" (" + j['utts'][x]['utt2spk'].replace('-', '_') + "-" + x + ")\n")
- # ref
+ # reference
if num_spkrs == 1:
- seq = [char_list[int(i)] for i in j['utts'][x]['output'][0]['tokenid'].split()]
+ seq = j['utts'][x]['output'][0]['token']
else:
- seq = [char_list[int(i)] for i in j['utts'][x]['output'][ns][0]['tokenid'].split()]
- ref_file.write(" ".join(seq).replace('<eos>', '')),
- ref_file.write(" (" + j['utts'][x]['utt2spk'].replace('-', '_') + "-" + x + ")\n")
+ seq = j['utts'][x]['output'][ns][0]['token']
+ # Unlike the recognition hypothesis, the reference is directly generated from a token without dictionary
+ # to avoid to include <unk> symbols in the reference to make scoring normal.
+ # The detailed discussion can be found at https://github.com/espnet/espnet/issues/993
+ ref_file.write(seq + " (" + j['utts'][x]['utt2spk'].replace('-', '_') + "-" + x + ")\n")
hyp_file.close()
ref_file.close()
| {"golden_diff": "diff --git a/utils/json2trn.py b/utils/json2trn.py\n--- a/utils/json2trn.py\n+++ b/utils/json2trn.py\n@@ -58,21 +58,25 @@\n ref_file = codecs.open(refs[ns], 'w', encoding=\"utf-8\")\n \n for x in j['utts']:\n- # hyps\n+ # recognition hypothesis\n if num_spkrs == 1:\n seq = [char_list[int(i)] for i in j['utts'][x]['output'][0]['rec_tokenid'].split()]\n else:\n seq = [char_list[int(i)] for i in j['utts'][x]['output'][ns][0]['rec_tokenid'].split()]\n+ # In the recognition hypothesis, the <eos> symbol is usually attached in the last part of the sentence\n+ # and it is removed below.\n hyp_file.write(\" \".join(seq).replace('<eos>', '')),\n hyp_file.write(\" (\" + j['utts'][x]['utt2spk'].replace('-', '_') + \"-\" + x + \")\\n\")\n \n- # ref\n+ # reference\n if num_spkrs == 1:\n- seq = [char_list[int(i)] for i in j['utts'][x]['output'][0]['tokenid'].split()]\n+ seq = j['utts'][x]['output'][0]['token']\n else:\n- seq = [char_list[int(i)] for i in j['utts'][x]['output'][ns][0]['tokenid'].split()]\n- ref_file.write(\" \".join(seq).replace('<eos>', '')),\n- ref_file.write(\" (\" + j['utts'][x]['utt2spk'].replace('-', '_') + \"-\" + x + \")\\n\")\n+ seq = j['utts'][x]['output'][ns][0]['token']\n+ # Unlike the recognition hypothesis, the reference is directly generated from a token without dictionary\n+ # to avoid to include <unk> symbols in the reference to make scoring normal.\n+ # The detailed discussion can be found at https://github.com/espnet/espnet/issues/993\n+ ref_file.write(seq + \" (\" + j['utts'][x]['utt2spk'].replace('-', '_') + \"-\" + x + \")\\n\")\n \n hyp_file.close()\n ref_file.close()\n", "issue": "The scoring script is incorrect if any pruning is done on the dictionary or in the case of word models\nHi @sw005320 and other developers,\r\n\r\nThe command `json2trn.py ${dir}/data.json ${dic} --num-spkrs ${num_spkrs} --refs ${dir}/ref.trn --hyps ${dir}/hyp.trn` in the `score_sclite.sh` replaces all units in reference that are not in the dictionary with unk and then scores it.\r\n\r\nThis is wrong because you are modifying the reference transcription to match your dictionary. Let's say I train a model with dictionary with only 1 term \"Mr\" and the hypothesis is \r\n\"Mr unk unk\" and reference is \"Mr John Doe\". This would give a WER of 0% because the reference would be modified to \"Mr unk unk\". \n", "before_files": [{"content": "#!/usr/bin/env python\n# encoding: utf-8\n\n# Copyright 2017 Johns Hopkins University (Shinji Watanabe)\n# 2018 Xuankai Chang (Shanghai Jiao Tong University)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\nimport argparse\nimport codecs\nimport json\nimport logging\nimport sys\n\nfrom espnet.utils.cli_utils import get_commandline_args\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n description='convert a json to a transcription file with a token dictionary',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('json', type=str, help='json files')\n parser.add_argument('dict', type=str, help='dict')\n parser.add_argument('--num-spkrs', type=int, default=1, help='number of speakers')\n parser.add_argument('--refs', type=str, nargs='+', help='ref for all speakers')\n parser.add_argument('--hyps', type=str, nargs='+', help='hyp for all outputs')\n return parser\n\n\ndef main(args):\n args = get_parser().parse_args(args)\n convert(args.json, args.dict, args.refs, args.hyps, args.num_spkrs)\n\n\ndef convert(jsonf, dic, refs, hyps, num_spkrs=1):\n n_ref = len(refs)\n n_hyp = len(hyps)\n assert n_ref == n_hyp\n assert n_ref == num_spkrs\n\n # logging info\n logfmt = '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s'\n logging.basicConfig(level=logging.INFO, format=logfmt)\n logging.info(get_commandline_args())\n\n logging.info(\"reading %s\", jsonf)\n with codecs.open(jsonf, 'r', encoding=\"utf-8\") as f:\n j = json.load(f)\n\n logging.info(\"reading %s\", dic)\n with codecs.open(dic, 'r', encoding=\"utf-8\") as f:\n dictionary = f.readlines()\n char_list = [entry.split(' ')[0] for entry in dictionary]\n char_list.insert(0, '<blank>')\n char_list.append('<eos>')\n\n for ns in range(num_spkrs):\n hyp_file = codecs.open(hyps[ns], 'w', encoding=\"utf-8\")\n ref_file = codecs.open(refs[ns], 'w', encoding=\"utf-8\")\n\n for x in j['utts']:\n # hyps\n if num_spkrs == 1:\n seq = [char_list[int(i)] for i in j['utts'][x]['output'][0]['rec_tokenid'].split()]\n else:\n seq = [char_list[int(i)] for i in j['utts'][x]['output'][ns][0]['rec_tokenid'].split()]\n hyp_file.write(\" \".join(seq).replace('<eos>', '')),\n hyp_file.write(\" (\" + j['utts'][x]['utt2spk'].replace('-', '_') + \"-\" + x + \")\\n\")\n\n # ref\n if num_spkrs == 1:\n seq = [char_list[int(i)] for i in j['utts'][x]['output'][0]['tokenid'].split()]\n else:\n seq = [char_list[int(i)] for i in j['utts'][x]['output'][ns][0]['tokenid'].split()]\n ref_file.write(\" \".join(seq).replace('<eos>', '')),\n ref_file.write(\" (\" + j['utts'][x]['utt2spk'].replace('-', '_') + \"-\" + x + \")\\n\")\n\n hyp_file.close()\n ref_file.close()\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n", "path": "utils/json2trn.py"}]} | 1,698 | 525 |
gh_patches_debug_952 | rasdani/github-patches | git_diff | tobymao__sqlglot-2170 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Specifying 3.7+ in setup.py
sqlglot fails with Python version < 3.7.
While Python 3.6 is end-of-life, there are systems that still use 3.6 as they transition to 3.7. Having either `setup.py` or `setup.cfg` specify 3.7+ or adding to README would be helpful.
```
import sqlglot
```
`SyntaxError: future feature annotations is not defined`
**Official Documentation**
https://docs.python.org/3/library/__future__.html
</issue>
<code>
[start of setup.py]
1 from setuptools import find_packages, setup
2
3 setup(
4 name="sqlglot",
5 description="An easily customizable SQL parser and transpiler",
6 long_description=open("README.md").read(),
7 long_description_content_type="text/markdown",
8 url="https://github.com/tobymao/sqlglot",
9 author="Toby Mao",
10 author_email="[email protected]",
11 license="MIT",
12 packages=find_packages(include=["sqlglot", "sqlglot.*"]),
13 package_data={"sqlglot": ["py.typed"]},
14 use_scm_version={
15 "write_to": "sqlglot/_version.py",
16 "fallback_version": "0.0.0",
17 "local_scheme": "no-local-version",
18 },
19 setup_requires=["setuptools_scm"],
20 extras_require={
21 "dev": [
22 "autoflake",
23 "black",
24 "duckdb>=0.6",
25 "isort",
26 "mypy>=0.990",
27 "pandas",
28 "pyspark",
29 "python-dateutil",
30 "pdoc",
31 "pre-commit",
32 ],
33 },
34 classifiers=[
35 "Development Status :: 5 - Production/Stable",
36 "Intended Audience :: Developers",
37 "Intended Audience :: Science/Research",
38 "License :: OSI Approved :: MIT License",
39 "Operating System :: OS Independent",
40 "Programming Language :: SQL",
41 "Programming Language :: Python :: 3 :: Only",
42 ],
43 )
44
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -17,6 +17,7 @@
"local_scheme": "no-local-version",
},
setup_requires=["setuptools_scm"],
+ python_requires=">=3.7",
extras_require={
"dev": [
"autoflake",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -17,6 +17,7 @@\n \"local_scheme\": \"no-local-version\",\n },\n setup_requires=[\"setuptools_scm\"],\n+ python_requires=\">=3.7\",\n extras_require={\n \"dev\": [\n \"autoflake\",\n", "issue": "Specifying 3.7+ in setup.py\nsqlglot fails with Python version < 3.7. \r\n\r\nWhile Python 3.6 is end-of-life, there are systems that still use 3.6 as they transition to 3.7. Having either `setup.py` or `setup.cfg` specify 3.7+ or adding to README would be helpful. \r\n\r\n```\r\nimport sqlglot\r\n```\r\n`SyntaxError: future feature annotations is not defined`\r\n\r\n**Official Documentation**\r\nhttps://docs.python.org/3/library/__future__.html\r\n\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nsetup(\n name=\"sqlglot\",\n description=\"An easily customizable SQL parser and transpiler\",\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/tobymao/sqlglot\",\n author=\"Toby Mao\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n packages=find_packages(include=[\"sqlglot\", \"sqlglot.*\"]),\n package_data={\"sqlglot\": [\"py.typed\"]},\n use_scm_version={\n \"write_to\": \"sqlglot/_version.py\",\n \"fallback_version\": \"0.0.0\",\n \"local_scheme\": \"no-local-version\",\n },\n setup_requires=[\"setuptools_scm\"],\n extras_require={\n \"dev\": [\n \"autoflake\",\n \"black\",\n \"duckdb>=0.6\",\n \"isort\",\n \"mypy>=0.990\",\n \"pandas\",\n \"pyspark\",\n \"python-dateutil\",\n \"pdoc\",\n \"pre-commit\",\n ],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: SQL\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n)\n", "path": "setup.py"}]} | 1,047 | 79 |
gh_patches_debug_15529 | rasdani/github-patches | git_diff | pypa__virtualenv-1205 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
broken Python 3.3 support due to wheel
With #1176, Python 3.3 support is broken because wheel 0.31 dropped support.
This means that virtualenv 16.0.0 will install fine on a Python 3.3 config, but then fail to create any virtualenvs.
virtualenv should also declare that it doesn't support 3.3
</issue>
<code>
[start of setup.py]
1 import os
2 import re
3 import shutil
4 import sys
5
6 if sys.version_info[:2] < (2, 7):
7 sys.exit('virtualenv requires Python 2.7 or higher.')
8
9 try:
10 from setuptools import setup
11 from setuptools.command.test import test as TestCommand
12
13 class PyTest(TestCommand):
14 user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
15
16 def initialize_options(self):
17 TestCommand.initialize_options(self)
18 self.pytest_args = []
19
20 def finalize_options(self):
21 TestCommand.finalize_options(self)
22 #self.test_args = []
23 #self.test_suite = True
24
25 def run_tests(self):
26 # import here, because outside the eggs aren't loaded
27 import pytest
28 sys.exit(pytest.main(self.pytest_args))
29
30 setup_params = {
31 'entry_points': {
32 'console_scripts': ['virtualenv=virtualenv:main'],
33 },
34 'zip_safe': False,
35 'cmdclass': {'test': PyTest},
36 'tests_require': ['pytest', 'mock'],
37 }
38 except ImportError:
39 from distutils.core import setup
40 if sys.platform == 'win32':
41 print('Note: without Setuptools installed you will '
42 'have to use "python -m virtualenv ENV"')
43 setup_params = {}
44 else:
45 script = 'scripts/virtualenv'
46 setup_params = {'scripts': [script]}
47
48
49 def read_file(*paths):
50 here = os.path.dirname(os.path.abspath(__file__))
51 with open(os.path.join(here, *paths)) as f:
52 return f.read()
53
54 # Get long_description from index.rst:
55 long_description = read_file('docs', 'index.rst')
56 long_description = long_description.strip().split('split here', 1)[0]
57 # Add release history
58 changes = read_file('docs', 'changes.rst')
59 # Only report last two releases for brevity
60 releases_found = 0
61 change_lines = []
62 for line in changes.splitlines():
63 change_lines.append(line)
64 if line.startswith('--------------'):
65 releases_found += 1
66 if releases_found > 2:
67 break
68
69 changes = '\n'.join(change_lines[:-2]) + '\n'
70 changes += '`Full Changelog <https://virtualenv.pypa.io/en/latest/changes.html>`_.'
71 # Replace issue/pull directives
72 changes = re.sub(r':pull:`(\d+)`', r'PR #\1', changes)
73 changes = re.sub(r':issue:`(\d+)`', r'#\1', changes)
74
75 long_description += '\n\n' + changes
76
77
78 def get_version():
79 version_file = read_file('virtualenv.py')
80 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
81 version_file, re.M)
82 if version_match:
83 return version_match.group(1)
84 raise RuntimeError("Unable to find version string.")
85
86
87 # Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
88 # exit of python setup.py test # in multiprocessing/util.py _exit_function when
89 # running python setup.py test (see
90 # http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
91 try:
92 import multiprocessing # noqa
93 except ImportError:
94 pass
95
96 setup(
97 name='virtualenv',
98 version=get_version(),
99 description="Virtual Python Environment builder",
100 long_description=long_description,
101 classifiers=[
102 'Development Status :: 5 - Production/Stable',
103 'Intended Audience :: Developers',
104 'License :: OSI Approved :: MIT License',
105 'Programming Language :: Python :: 2',
106 'Programming Language :: Python :: 2.7',
107 'Programming Language :: Python :: 3',
108 'Programming Language :: Python :: 3.4',
109 'Programming Language :: Python :: 3.5',
110 'Programming Language :: Python :: 3.6',
111 ],
112 keywords='setuptools deployment installation distutils',
113 author='Ian Bicking',
114 author_email='[email protected]',
115 maintainer='Jannis Leidel, Carl Meyer and Brian Rosner',
116 maintainer_email='[email protected]',
117 url='https://virtualenv.pypa.io/',
118 license='MIT',
119 py_modules=['virtualenv'],
120 packages=['virtualenv_support'],
121 package_data={'virtualenv_support': ['*.whl']},
122 python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*',
123 **setup_params)
124
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -108,6 +108,7 @@
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
],
keywords='setuptools deployment installation distutils',
author='Ian Bicking',
@@ -119,5 +120,5 @@
py_modules=['virtualenv'],
packages=['virtualenv_support'],
package_data={'virtualenv_support': ['*.whl']},
- python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*',
+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
**setup_params)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -108,6 +108,7 @@\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n+ 'Programming Language :: Python :: 3.7',\n ],\n keywords='setuptools deployment installation distutils',\n author='Ian Bicking',\n@@ -119,5 +120,5 @@\n py_modules=['virtualenv'],\n packages=['virtualenv_support'],\n package_data={'virtualenv_support': ['*.whl']},\n- python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*',\n+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n **setup_params)\n", "issue": "broken Python 3.3 support due to wheel\nWith #1176, Python 3.3 support is broken because wheel 0.31 dropped support.\r\n\r\nThis means that virtualenv 16.0.0 will install fine on a Python 3.3 config, but then fail to create any virtualenvs.\r\n\r\nvirtualenv should also declare that it doesn't support 3.3\n", "before_files": [{"content": "import os\nimport re\nimport shutil\nimport sys\n\nif sys.version_info[:2] < (2, 7):\n sys.exit('virtualenv requires Python 2.7 or higher.')\n\ntry:\n from setuptools import setup\n from setuptools.command.test import test as TestCommand\n\n class PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n #self.test_args = []\n #self.test_suite = True\n\n def run_tests(self):\n # import here, because outside the eggs aren't loaded\n import pytest\n sys.exit(pytest.main(self.pytest_args))\n\n setup_params = {\n 'entry_points': {\n 'console_scripts': ['virtualenv=virtualenv:main'],\n },\n 'zip_safe': False,\n 'cmdclass': {'test': PyTest},\n 'tests_require': ['pytest', 'mock'],\n }\nexcept ImportError:\n from distutils.core import setup\n if sys.platform == 'win32':\n print('Note: without Setuptools installed you will '\n 'have to use \"python -m virtualenv ENV\"')\n setup_params = {}\n else:\n script = 'scripts/virtualenv'\n setup_params = {'scripts': [script]}\n\n\ndef read_file(*paths):\n here = os.path.dirname(os.path.abspath(__file__))\n with open(os.path.join(here, *paths)) as f:\n return f.read()\n\n# Get long_description from index.rst:\nlong_description = read_file('docs', 'index.rst')\nlong_description = long_description.strip().split('split here', 1)[0]\n# Add release history\nchanges = read_file('docs', 'changes.rst')\n# Only report last two releases for brevity\nreleases_found = 0\nchange_lines = []\nfor line in changes.splitlines():\n change_lines.append(line)\n if line.startswith('--------------'):\n releases_found += 1\n if releases_found > 2:\n break\n\nchanges = '\\n'.join(change_lines[:-2]) + '\\n'\nchanges += '`Full Changelog <https://virtualenv.pypa.io/en/latest/changes.html>`_.'\n# Replace issue/pull directives\nchanges = re.sub(r':pull:`(\\d+)`', r'PR #\\1', changes)\nchanges = re.sub(r':issue:`(\\d+)`', r'#\\1', changes)\n\nlong_description += '\\n\\n' + changes\n\n\ndef get_version():\n version_file = read_file('virtualenv.py')\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\n# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on\n# exit of python setup.py test # in multiprocessing/util.py _exit_function when\n# running python setup.py test (see\n# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\ntry:\n import multiprocessing # noqa\nexcept ImportError:\n pass\n\nsetup(\n name='virtualenv',\n version=get_version(),\n description=\"Virtual Python Environment builder\",\n long_description=long_description,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='setuptools deployment installation distutils',\n author='Ian Bicking',\n author_email='[email protected]',\n maintainer='Jannis Leidel, Carl Meyer and Brian Rosner',\n maintainer_email='[email protected]',\n url='https://virtualenv.pypa.io/',\n license='MIT',\n py_modules=['virtualenv'],\n packages=['virtualenv_support'],\n package_data={'virtualenv_support': ['*.whl']},\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*',\n **setup_params)\n", "path": "setup.py"}]} | 1,865 | 206 |
gh_patches_debug_8070 | rasdani/github-patches | git_diff | mindsdb__mindsdb-130 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cleanup before releasing v.1.0
* Remove any unused files
* Remove any unused variable and functions (loads of those in config I'd think)
* Remove any unused requirements
* Update the version for requirements
* Update and standardize the interface of older functions and add comments to them (probably won't have time for all, but at least some)
</issue>
<code>
[start of setup.py]
1 import setuptools
2
3 about = {}
4 with open("mindsdb/__about__.py") as fp:
5 exec(fp.read(), about)
6
7 long_description = open('README.md', encoding='utf-8').read()
8
9 with open('requirements.txt') as req_file:
10 requirements = req_file.read().splitlines()
11
12 setuptools.setup(
13 name=about['__title__'],
14 version=about['__version__'],
15 url=about['__github__'],
16 download_url=about['__pypi__'],
17 license=about['__license__'],
18 author=about['__author__'],
19 author_email=about['__email__'],
20 description=about['__description__'],
21 long_description=long_description,
22 long_description_content_type="text/markdown",
23 packages=setuptools.find_packages(),
24 install_requires=requirements,
25 classifiers=(
26 "Programming Language :: Python :: 3",
27 "License :: OSI Approved :: MIT License",
28 "Operating System :: OS Independent",
29 ),
30 python_requires=">=3.3"
31 )
32
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,5 @@
import setuptools
+import subprocess
about = {}
with open("mindsdb/__about__.py") as fp:
@@ -27,5 +28,10 @@
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
- python_requires=">=3.3"
+ python_requires=">=3.6"
)
+
+try:
+ subprocess.call(['python3','-m','spacy','download','en'])
+except:
+ subprocess.call(['python','-m','spacy','download','en'])
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,4 +1,5 @@\n import setuptools\n+import subprocess\n \n about = {}\n with open(\"mindsdb/__about__.py\") as fp:\n@@ -27,5 +28,10 @@\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ),\n- python_requires=\">=3.3\"\n+ python_requires=\">=3.6\"\n )\n+\n+try:\n+ subprocess.call(['python3','-m','spacy','download','en'])\n+except:\n+ subprocess.call(['python','-m','spacy','download','en'])\n", "issue": "Cleanup before releasing v.1.0\n* Remove any unused files\r\n* Remove any unused variable and functions (loads of those in config I'd think)\r\n* Remove any unused requirements\r\n* Update the version for requirements\r\n* Update and standardize the interface of older functions and add comments to them (probably won't have time for all, but at least some)\n", "before_files": [{"content": "import setuptools\n\nabout = {}\nwith open(\"mindsdb/__about__.py\") as fp:\n exec(fp.read(), about)\n\nlong_description = open('README.md', encoding='utf-8').read()\n\nwith open('requirements.txt') as req_file:\n requirements = req_file.read().splitlines()\n\nsetuptools.setup(\n name=about['__title__'],\n version=about['__version__'],\n url=about['__github__'],\n download_url=about['__pypi__'],\n license=about['__license__'],\n author=about['__author__'],\n author_email=about['__email__'],\n description=about['__description__'],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=setuptools.find_packages(),\n install_requires=requirements,\n classifiers=(\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ),\n python_requires=\">=3.3\"\n)\n", "path": "setup.py"}]} | 869 | 149 |
gh_patches_debug_4948 | rasdani/github-patches | git_diff | ansible__ansible-11626 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
devel bug - profile_tasks.py requires CALLBACK_ constants or the display param is not passed
relates to #11625 and commit a09f623
```
$ ansible-playbook plays/test_to_json.yml -vvv
Using /Users/glynch/vagrant/ansible_foo/ansible.cfg as config file
1 plays in plays/test_to_json.yml
[ERROR]: Unexpected Exception: __init__() takes exactly 2 arguments (1 given)
the full traceback was:
Traceback (most recent call last):
File "/Users/glynch/dev/ansible/bin/ansible-playbook", line 77, in <module>
sys.exit(cli.run())
File "/Users/glynch/dev/ansible/lib/ansible/cli/playbook.py", line 162, in run
results = pbex.run()
File "/Users/glynch/dev/ansible/lib/ansible/executor/playbook_executor.py", line 128, in run
self._tqm.load_callbacks()
File "/Users/glynch/dev/ansible/lib/ansible/executor/task_queue_manager.py", line 154, in load_callbacks
self._callback_plugins.append(callback_plugin())
TypeError: __init__() takes exactly 2 arguments (1 given)
```
</issue>
<code>
[start of lib/ansible/plugins/callback/profile_tasks.py]
1 # (C) 2015, Tom Paine, <[email protected]>
2 # (C) 2014, Jharrod LaFon, @JharrodLaFon
3 # (C) 2012-2013, Michael DeHaan, <[email protected]>
4 #
5 # This file is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # File is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # See <http://www.gnu.org/licenses/> for a copy of the
16 # GNU General Public License
17
18 # Provides per-task timing, ongoing playbook elapsed time and
19 # ordered list of top 20 longest running tasks at end
20
21 import time
22
23 from ansible.plugins.callback import CallbackBase
24
25 # define start time
26 t0 = tn = time.time()
27
28 def secondsToStr(t):
29 # http://bytes.com/topic/python/answers/635958-handy-short-cut-formatting-elapsed-time-floating-point-seconds
30 rediv = lambda ll, b: list(divmod(ll[0], b)) + ll[1:]
31 return "%d:%02d:%02d.%03d" % tuple(reduce(rediv, [[t * 1000, ], 1000, 60, 60]))
32
33
34 def filled(msg, fchar="*"):
35 if len(msg) == 0:
36 width = 79
37 else:
38 msg = "%s " % msg
39 width = 79 - len(msg)
40 if width < 3:
41 width = 3
42 filler = fchar * width
43 return "%s%s " % (msg, filler)
44
45
46 def timestamp(self):
47 if self.current is not None:
48 self.stats[self.current] = time.time() - self.stats[self.current]
49
50
51 def tasktime():
52 global tn
53 time_current = time.strftime('%A %d %B %Y %H:%M:%S %z')
54 time_elapsed = secondsToStr(time.time() - tn)
55 time_total_elapsed = secondsToStr(time.time() - t0)
56 display(filled('%s (%s)%s%s' % (time_current, time_elapsed, ' ' * 7, time_total_elapsed)))
57 tn = time.time()
58
59
60 class CallbackModule(CallbackBase):
61
62 def __init__(self, display):
63 self.stats = {}
64 self.current = None
65
66 super(CallbackModule, self).__init__(display)
67
68
69 def playbook_on_task_start(self, name, is_conditional):
70 """
71 Logs the start of each task
72 """
73 tasktime()
74 timestamp(self)
75
76 # Record the start time of the current task
77 self.current = name
78 self.stats[self.current] = time.time()
79
80 def playbook_on_setup(self):
81 tasktime()
82
83 def playbook_on_stats(self, stats):
84 tasktime()
85 display(filled("", fchar="="))
86
87 timestamp(self)
88
89 # Sort the tasks by their running time
90 results = sorted(
91 self.stats.items(),
92 key=lambda value: value[1],
93 reverse=True,
94 )
95
96 # Just keep the top 20
97 results = results[:20]
98
99 # Print the timings
100 for name, elapsed in results:
101 self.display.display(
102 "{0:-<70}{1:->9}".format(
103 '{0} '.format(name),
104 ' {0:.02f}s'.format(elapsed),
105 )
106 )
107
[end of lib/ansible/plugins/callback/profile_tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansible/plugins/callback/profile_tasks.py b/lib/ansible/plugins/callback/profile_tasks.py
--- a/lib/ansible/plugins/callback/profile_tasks.py
+++ b/lib/ansible/plugins/callback/profile_tasks.py
@@ -58,7 +58,14 @@
class CallbackModule(CallbackBase):
-
+ """
+ This callback module provides per-task timing, ongoing playbook elapsed time
+ and ordered list of top 20 longest running tasks at end.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'profile_tasks'
+
def __init__(self, display):
self.stats = {}
self.current = None
| {"golden_diff": "diff --git a/lib/ansible/plugins/callback/profile_tasks.py b/lib/ansible/plugins/callback/profile_tasks.py\n--- a/lib/ansible/plugins/callback/profile_tasks.py\n+++ b/lib/ansible/plugins/callback/profile_tasks.py\n@@ -58,7 +58,14 @@\n \n \n class CallbackModule(CallbackBase):\n-\n+ \"\"\"\n+ This callback module provides per-task timing, ongoing playbook elapsed time \n+ and ordered list of top 20 longest running tasks at end.\n+ \"\"\"\n+ CALLBACK_VERSION = 2.0\n+ CALLBACK_TYPE = 'aggregate'\n+ CALLBACK_NAME = 'profile_tasks'\n+ \n def __init__(self, display):\n self.stats = {}\n self.current = None\n", "issue": "devel bug - profile_tasks.py requires CALLBACK_ constants or the display param is not passed\nrelates to #11625 and commit a09f623\n\n```\n$ ansible-playbook plays/test_to_json.yml -vvv\nUsing /Users/glynch/vagrant/ansible_foo/ansible.cfg as config file\n1 plays in plays/test_to_json.yml\n [ERROR]: Unexpected Exception: __init__() takes exactly 2 arguments (1 given)\n\nthe full traceback was:\n\nTraceback (most recent call last):\n File \"/Users/glynch/dev/ansible/bin/ansible-playbook\", line 77, in <module>\n sys.exit(cli.run())\n File \"/Users/glynch/dev/ansible/lib/ansible/cli/playbook.py\", line 162, in run\n results = pbex.run()\n File \"/Users/glynch/dev/ansible/lib/ansible/executor/playbook_executor.py\", line 128, in run\n self._tqm.load_callbacks()\n File \"/Users/glynch/dev/ansible/lib/ansible/executor/task_queue_manager.py\", line 154, in load_callbacks\n self._callback_plugins.append(callback_plugin())\nTypeError: __init__() takes exactly 2 arguments (1 given)\n```\n\n", "before_files": [{"content": "# (C) 2015, Tom Paine, <[email protected]>\n# (C) 2014, Jharrod LaFon, @JharrodLaFon\n# (C) 2012-2013, Michael DeHaan, <[email protected]>\n#\n# This file is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# File is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# See <http://www.gnu.org/licenses/> for a copy of the\n# GNU General Public License\n\n# Provides per-task timing, ongoing playbook elapsed time and\n# ordered list of top 20 longest running tasks at end\n\nimport time\n\nfrom ansible.plugins.callback import CallbackBase\n\n# define start time\nt0 = tn = time.time()\n\ndef secondsToStr(t):\n # http://bytes.com/topic/python/answers/635958-handy-short-cut-formatting-elapsed-time-floating-point-seconds\n rediv = lambda ll, b: list(divmod(ll[0], b)) + ll[1:]\n return \"%d:%02d:%02d.%03d\" % tuple(reduce(rediv, [[t * 1000, ], 1000, 60, 60]))\n\n\ndef filled(msg, fchar=\"*\"):\n if len(msg) == 0:\n width = 79\n else:\n msg = \"%s \" % msg\n width = 79 - len(msg)\n if width < 3:\n width = 3\n filler = fchar * width\n return \"%s%s \" % (msg, filler)\n\n\ndef timestamp(self):\n if self.current is not None:\n self.stats[self.current] = time.time() - self.stats[self.current]\n\n\ndef tasktime():\n global tn\n time_current = time.strftime('%A %d %B %Y %H:%M:%S %z')\n time_elapsed = secondsToStr(time.time() - tn)\n time_total_elapsed = secondsToStr(time.time() - t0)\n display(filled('%s (%s)%s%s' % (time_current, time_elapsed, ' ' * 7, time_total_elapsed)))\n tn = time.time()\n\n\nclass CallbackModule(CallbackBase):\n\n def __init__(self, display):\n self.stats = {}\n self.current = None\n\n super(CallbackModule, self).__init__(display)\n\n\n def playbook_on_task_start(self, name, is_conditional):\n \"\"\"\n Logs the start of each task\n \"\"\"\n tasktime()\n timestamp(self)\n\n # Record the start time of the current task\n self.current = name\n self.stats[self.current] = time.time()\n\n def playbook_on_setup(self):\n tasktime()\n\n def playbook_on_stats(self, stats):\n tasktime()\n display(filled(\"\", fchar=\"=\"))\n\n timestamp(self)\n\n # Sort the tasks by their running time\n results = sorted(\n self.stats.items(),\n key=lambda value: value[1],\n reverse=True,\n )\n\n # Just keep the top 20\n results = results[:20]\n\n # Print the timings\n for name, elapsed in results:\n self.display.display(\n \"{0:-<70}{1:->9}\".format(\n '{0} '.format(name),\n ' {0:.02f}s'.format(elapsed),\n )\n )\n", "path": "lib/ansible/plugins/callback/profile_tasks.py"}]} | 1,863 | 153 |
gh_patches_debug_8110 | rasdani/github-patches | git_diff | OCA__bank-payment-48 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Crash in account_payment_purchase
When you create an invoice from an incoming products on v7 with account_payment_purchase:
File "/home/erp_super/erp/prod/src/addons/stock/stock.py", line 1153, in action_invoice_create
invoice_vals = self._prepare_invoice(cr, uid, picking, partner, inv_type, journal_id, context=context)
File "/home/erp_super/erp/prod/src/banking/account_payment_sale_stock/model/stock.py", line 34, in _prepare_invoice
cr, uid, picking, partner, inv_type, journal_id, context=context)
File "/home/erp_super/erp/prod/src/banking/account_payment_purchase/model/stock.py", line 38, in _prepare_invoice
picking.purchase_id.supplier_partner_bank.id or False,
File "/home/erp_super/erp/prod/src/server/openerp/osv/orm.py", line 504, in __getattr__
raise AttributeError(e)
AttributeError: "Field 'supplier_partner_bank' does not exist in object 'browse_record(purchase.order, 774)'"
It seems that, when we decided to change to datamodel of the account_payment_\* modules, the file account_payment_purchase/model/stock.py was not changed !!! I'll do a PR to fix this.
</issue>
<code>
[start of account_payment_purchase/model/stock.py]
1 # -*- encoding: utf-8 -*-
2 ##############################################################################
3 #
4 # Account Payment Purchase module for OpenERP
5 # Copyright (C) 2014 Akretion (http://www.akretion.com)
6 # @author Alexis de Lattre <[email protected]>
7 #
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU Affero General Public License as
10 # published by the Free Software Foundation, either version 3 of the
11 # License, or (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Affero General Public License for more details.
17 #
18 # You should have received a copy of the GNU Affero General Public License
19 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #
21 ##############################################################################
22
23 from openerp.osv import orm
24
25
26 class stock_picking(orm.Model):
27 _inherit = "stock.picking"
28
29 def _prepare_invoice(
30 self, cr, uid, picking, partner, inv_type, journal_id,
31 context=None):
32 """Copy bank partner and payment type from PO to invoice"""
33 invoice_vals = super(stock_picking, self)._prepare_invoice(
34 cr, uid, picking, partner, inv_type, journal_id, context=context)
35 if picking.purchase_id:
36 invoice_vals.update({
37 'partner_bank_id':
38 picking.purchase_id.supplier_partner_bank.id or False,
39 'payment_mode_type':
40 picking.purchase_id.payment_mode_type.id or False,
41 })
42 return invoice_vals
43
[end of account_payment_purchase/model/stock.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/account_payment_purchase/model/stock.py b/account_payment_purchase/model/stock.py
--- a/account_payment_purchase/model/stock.py
+++ b/account_payment_purchase/model/stock.py
@@ -35,8 +35,8 @@
if picking.purchase_id:
invoice_vals.update({
'partner_bank_id':
- picking.purchase_id.supplier_partner_bank.id or False,
- 'payment_mode_type':
- picking.purchase_id.payment_mode_type.id or False,
+ picking.purchase_id.supplier_partner_bank_id.id or False,
+ 'payment_mode_id':
+ picking.purchase_id.payment_mode_id.id or False,
})
return invoice_vals
| {"golden_diff": "diff --git a/account_payment_purchase/model/stock.py b/account_payment_purchase/model/stock.py\n--- a/account_payment_purchase/model/stock.py\n+++ b/account_payment_purchase/model/stock.py\n@@ -35,8 +35,8 @@\n if picking.purchase_id:\n invoice_vals.update({\n 'partner_bank_id':\n- picking.purchase_id.supplier_partner_bank.id or False,\n- 'payment_mode_type':\n- picking.purchase_id.payment_mode_type.id or False,\n+ picking.purchase_id.supplier_partner_bank_id.id or False,\n+ 'payment_mode_id':\n+ picking.purchase_id.payment_mode_id.id or False,\n })\n return invoice_vals\n", "issue": "Crash in account_payment_purchase\nWhen you create an invoice from an incoming products on v7 with account_payment_purchase:\n\n File \"/home/erp_super/erp/prod/src/addons/stock/stock.py\", line 1153, in action_invoice_create\n invoice_vals = self._prepare_invoice(cr, uid, picking, partner, inv_type, journal_id, context=context)\n File \"/home/erp_super/erp/prod/src/banking/account_payment_sale_stock/model/stock.py\", line 34, in _prepare_invoice\n cr, uid, picking, partner, inv_type, journal_id, context=context)\n File \"/home/erp_super/erp/prod/src/banking/account_payment_purchase/model/stock.py\", line 38, in _prepare_invoice\n picking.purchase_id.supplier_partner_bank.id or False,\n File \"/home/erp_super/erp/prod/src/server/openerp/osv/orm.py\", line 504, in __getattr__\n raise AttributeError(e)\nAttributeError: \"Field 'supplier_partner_bank' does not exist in object 'browse_record(purchase.order, 774)'\"\n\nIt seems that, when we decided to change to datamodel of the account_payment_\\* modules, the file account_payment_purchase/model/stock.py was not changed !!! I'll do a PR to fix this.\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# Account Payment Purchase module for OpenERP\n# Copyright (C) 2014 Akretion (http://www.akretion.com)\n# @author Alexis de Lattre <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n\nfrom openerp.osv import orm\n\n\nclass stock_picking(orm.Model):\n _inherit = \"stock.picking\"\n\n def _prepare_invoice(\n self, cr, uid, picking, partner, inv_type, journal_id,\n context=None):\n \"\"\"Copy bank partner and payment type from PO to invoice\"\"\"\n invoice_vals = super(stock_picking, self)._prepare_invoice(\n cr, uid, picking, partner, inv_type, journal_id, context=context)\n if picking.purchase_id:\n invoice_vals.update({\n 'partner_bank_id':\n picking.purchase_id.supplier_partner_bank.id or False,\n 'payment_mode_type':\n picking.purchase_id.payment_mode_type.id or False,\n })\n return invoice_vals\n", "path": "account_payment_purchase/model/stock.py"}]} | 1,276 | 142 |
gh_patches_debug_19014 | rasdani/github-patches | git_diff | mne-tools__mne-python-9070 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
use bibtex in plot_sensor_noise_level.py
convert references in `examples/visualization/plot_sensor_noise_level.py` to use footcite / footbibliography
</issue>
<code>
[start of examples/visualization/plot_sensor_noise_level.py]
1 # -*- coding: utf-8 -*-
2 """
3 ======================================
4 Show noise levels from empty room data
5 ======================================
6
7 This shows how to use :meth:`mne.io.Raw.plot_psd` to examine noise levels
8 of systems. See [1]_ for an example.
9
10 References
11 ----------
12 .. [1] Khan S, Cohen D (2013). Note: Magnetic noise from the inner wall of
13 a magnetically shielded room. Review of Scientific Instruments 84:56101.
14 https://doi.org/10.1063/1.4802845
15 """
16 # Author: Eric Larson <[email protected]>
17 #
18 # License: BSD (3-clause)
19
20 import os.path as op
21 import mne
22
23 data_path = mne.datasets.sample.data_path()
24
25 raw_erm = mne.io.read_raw_fif(op.join(data_path, 'MEG', 'sample',
26 'ernoise_raw.fif'), preload=True)
27
28 ###############################################################################
29 # We can plot the absolute noise levels:
30 raw_erm.plot_psd(tmax=10., average=True, spatial_colors=False,
31 dB=False, xscale='log')
32
[end of examples/visualization/plot_sensor_noise_level.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/visualization/plot_sensor_noise_level.py b/examples/visualization/plot_sensor_noise_level.py
--- a/examples/visualization/plot_sensor_noise_level.py
+++ b/examples/visualization/plot_sensor_noise_level.py
@@ -5,13 +5,7 @@
======================================
This shows how to use :meth:`mne.io.Raw.plot_psd` to examine noise levels
-of systems. See [1]_ for an example.
-
-References
-----------
-.. [1] Khan S, Cohen D (2013). Note: Magnetic noise from the inner wall of
- a magnetically shielded room. Review of Scientific Instruments 84:56101.
- https://doi.org/10.1063/1.4802845
+of systems. See :footcite:`KhanCohen2013` for an example.
"""
# Author: Eric Larson <[email protected]>
#
@@ -29,3 +23,8 @@
# We can plot the absolute noise levels:
raw_erm.plot_psd(tmax=10., average=True, spatial_colors=False,
dB=False, xscale='log')
+###############################################################################
+# References
+# ----------
+#
+# .. footbibliography::
| {"golden_diff": "diff --git a/examples/visualization/plot_sensor_noise_level.py b/examples/visualization/plot_sensor_noise_level.py\n--- a/examples/visualization/plot_sensor_noise_level.py\n+++ b/examples/visualization/plot_sensor_noise_level.py\n@@ -5,13 +5,7 @@\n ======================================\n \n This shows how to use :meth:`mne.io.Raw.plot_psd` to examine noise levels\n-of systems. See [1]_ for an example.\n-\n-References\n-----------\n-.. [1] Khan S, Cohen D (2013). Note: Magnetic noise from the inner wall of\n- a magnetically shielded room. Review of Scientific Instruments 84:56101.\n- https://doi.org/10.1063/1.4802845\n+of systems. See :footcite:`KhanCohen2013` for an example.\n \"\"\"\n # Author: Eric Larson <[email protected]>\n #\n@@ -29,3 +23,8 @@\n # We can plot the absolute noise levels:\n raw_erm.plot_psd(tmax=10., average=True, spatial_colors=False,\n dB=False, xscale='log')\n+###############################################################################\n+# References\n+# ----------\n+#\n+# .. footbibliography::\n", "issue": "use bibtex in plot_sensor_noise_level.py\nconvert references in `examples/visualization/plot_sensor_noise_level.py` to use footcite / footbibliography\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n======================================\nShow noise levels from empty room data\n======================================\n\nThis shows how to use :meth:`mne.io.Raw.plot_psd` to examine noise levels\nof systems. See [1]_ for an example.\n\nReferences\n----------\n.. [1] Khan S, Cohen D (2013). Note: Magnetic noise from the inner wall of\n a magnetically shielded room. Review of Scientific Instruments 84:56101.\n https://doi.org/10.1063/1.4802845\n\"\"\"\n# Author: Eric Larson <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport mne\n\ndata_path = mne.datasets.sample.data_path()\n\nraw_erm = mne.io.read_raw_fif(op.join(data_path, 'MEG', 'sample',\n 'ernoise_raw.fif'), preload=True)\n\n###############################################################################\n# We can plot the absolute noise levels:\nraw_erm.plot_psd(tmax=10., average=True, spatial_colors=False,\n dB=False, xscale='log')\n", "path": "examples/visualization/plot_sensor_noise_level.py"}]} | 887 | 278 |
gh_patches_debug_60781 | rasdani/github-patches | git_diff | pypa__cibuildwheel-1065 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Docs] Example may accidentally be encouraging users to write invalid `pyproject.toml` files
### Description
Hi guys, first of all thank you for the amazing project (always good to remember).
While I was working on adding support for PEP 621 to setuptools, I noticed a series of users having problems with invalid `pyproject.toml` files. The common pattern seem to be a almost empty `[project]` table with only a `requires-python` field set, which is invalid according to PEP 621.
It took me some time to find the reason for this behaviour but I think it comes from:
https://cibuildwheel.readthedocs.io/en/stable/options/#requires-python
I suspect that the example and notes about the preferred way of setting the config has been encouraging users that are unaware of PEP 621 to write technically invalid `pyproject.toml` files.
Please note that this issue is not necessarily related to setuptools itself.
The existence of the `[project]` table in the `pyproject.toml` allows (according to the standard) the installer/builder/consumer program to treat the package differently (specially regarding `dynamic`).
I think it would be nice to at least add a note about this so users became aware of the implications of adding a `[project]` table.
### Build log
_No response_
### CI config
_No response_
</issue>
<code>
[start of setup.py]
1 from setuptools import setup
2
3 extras = {
4 "docs": [
5 "mkdocs-include-markdown-plugin==2.8.0",
6 "mkdocs==1.0.4",
7 "pymdown-extensions",
8 "mkdocs-macros-plugin",
9 ],
10 "test": [
11 "jinja2",
12 "pytest>=6",
13 "pytest-timeout",
14 "pytest-xdist",
15 ],
16 "bin": [
17 "click",
18 "ghapi",
19 "pip-tools",
20 "pygithub",
21 "pyyaml",
22 "requests",
23 "rich>=9.6",
24 "packaging>=21.0",
25 ],
26 "mypy": [
27 "mypy>=0.901",
28 "types-jinja2",
29 "types-certifi",
30 "types-toml",
31 "types-jinja2",
32 "types-pyyaml",
33 "types-click",
34 "types-requests",
35 ],
36 }
37
38 extras["dev"] = [
39 *extras["mypy"],
40 *extras["test"],
41 *extras["bin"],
42 ]
43
44 extras["all"] = sum(extras.values(), [])
45
46 setup(extras_require=extras)
47
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,6 +4,7 @@
"docs": [
"mkdocs-include-markdown-plugin==2.8.0",
"mkdocs==1.0.4",
+ "jinja2==3.0.3",
"pymdown-extensions",
"mkdocs-macros-plugin",
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,6 +4,7 @@\n \"docs\": [\n \"mkdocs-include-markdown-plugin==2.8.0\",\n \"mkdocs==1.0.4\",\n+ \"jinja2==3.0.3\",\n \"pymdown-extensions\",\n \"mkdocs-macros-plugin\",\n ],\n", "issue": "[Docs] Example may accidentally be encouraging users to write invalid `pyproject.toml` files\n### Description\r\n\r\nHi guys, first of all thank you for the amazing project (always good to remember).\r\n\r\nWhile I was working on adding support for PEP 621 to setuptools, I noticed a series of users having problems with invalid `pyproject.toml` files. The common pattern seem to be a almost empty `[project]` table with only a `requires-python` field set, which is invalid according to PEP 621.\r\n\r\nIt took me some time to find the reason for this behaviour but I think it comes from:\r\nhttps://cibuildwheel.readthedocs.io/en/stable/options/#requires-python\r\n\r\nI suspect that the example and notes about the preferred way of setting the config has been encouraging users that are unaware of PEP 621 to write technically invalid `pyproject.toml` files.\r\n\r\nPlease note that this issue is not necessarily related to setuptools itself.\r\nThe existence of the `[project]` table in the `pyproject.toml` allows (according to the standard) the installer/builder/consumer program to treat the package differently (specially regarding `dynamic`).\r\n\r\nI think it would be nice to at least add a note about this so users became aware of the implications of adding a `[project]` table.\r\n\r\n### Build log\r\n\r\n_No response_\r\n\r\n### CI config\r\n\r\n_No response_\n", "before_files": [{"content": "from setuptools import setup\n\nextras = {\n \"docs\": [\n \"mkdocs-include-markdown-plugin==2.8.0\",\n \"mkdocs==1.0.4\",\n \"pymdown-extensions\",\n \"mkdocs-macros-plugin\",\n ],\n \"test\": [\n \"jinja2\",\n \"pytest>=6\",\n \"pytest-timeout\",\n \"pytest-xdist\",\n ],\n \"bin\": [\n \"click\",\n \"ghapi\",\n \"pip-tools\",\n \"pygithub\",\n \"pyyaml\",\n \"requests\",\n \"rich>=9.6\",\n \"packaging>=21.0\",\n ],\n \"mypy\": [\n \"mypy>=0.901\",\n \"types-jinja2\",\n \"types-certifi\",\n \"types-toml\",\n \"types-jinja2\",\n \"types-pyyaml\",\n \"types-click\",\n \"types-requests\",\n ],\n}\n\nextras[\"dev\"] = [\n *extras[\"mypy\"],\n *extras[\"test\"],\n *extras[\"bin\"],\n]\n\nextras[\"all\"] = sum(extras.values(), [])\n\nsetup(extras_require=extras)\n", "path": "setup.py"}]} | 1,163 | 95 |
gh_patches_debug_19725 | rasdani/github-patches | git_diff | streamlink__streamlink-3247 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't find "tv3cat" plugin
## Plugin Issue
<!-- Replace [ ] with [x] in order to check the box -->
- [ X ] This is a plugin issue and I have read the contribution guidelines.
### Description
<!-- Explain the plugin issue as thoroughly as you can. -->
A image is worth a thousand words.

### Reproduction steps
<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->
I've just using the "python3-streamlink" package provided by Fedora 32
Thanks!
</issue>
<code>
[start of src/streamlink/plugins/tv3cat.py]
1 import logging
2 import re
3
4 from streamlink.plugin import Plugin, PluginError
5 from streamlink.stream import HLSStream
6 from streamlink.plugin.api import validate
7
8 log = logging.getLogger(__name__)
9
10
11 class TV3Cat(Plugin):
12 _url_re = re.compile(r"http://(?:www.)?ccma.cat/tv3/directe/(.+?)/")
13 _stream_info_url = "http://dinamics.ccma.cat/pvideo/media.jsp" \
14 "?media=video&version=0s&idint={ident}&profile=pc&desplacament=0"
15 _media_schema = validate.Schema({
16 "geo": validate.text,
17 "url": validate.url(scheme=validate.any("http", "https"))
18 })
19 _channel_schema = validate.Schema({
20 "media": validate.any([_media_schema], _media_schema)},
21 validate.get("media"),
22 # If there is only one item, it's not a list ... silly
23 validate.transform(lambda x: x if isinstance(x, list) else [x])
24 )
25
26 @classmethod
27 def can_handle_url(cls, url):
28 return cls._url_re.match(url) is not None
29
30 def _get_streams(self):
31 match = self._url_re.match(self.url)
32 if match:
33 ident = match.group(1)
34 data_url = self._stream_info_url.format(ident=ident)
35 stream_infos = self.session.http.json(self.session.http.get(data_url), schema=self._channel_schema)
36
37 for stream in stream_infos:
38 try:
39 return HLSStream.parse_variant_playlist(self.session, stream['url'], name_fmt="{pixels}_{bitrate}")
40 except PluginError:
41 log.debug("Failed to get streams for: {0}".format(stream['geo']))
42 pass
43
44
45 __plugin__ = TV3Cat
46
[end of src/streamlink/plugins/tv3cat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/tv3cat.py b/src/streamlink/plugins/tv3cat.py
--- a/src/streamlink/plugins/tv3cat.py
+++ b/src/streamlink/plugins/tv3cat.py
@@ -9,7 +9,7 @@
class TV3Cat(Plugin):
- _url_re = re.compile(r"http://(?:www.)?ccma.cat/tv3/directe/(.+?)/")
+ _url_re = re.compile(r"https?://(?:www\.)?ccma\.cat/tv3/directe/(.+?)/")
_stream_info_url = "http://dinamics.ccma.cat/pvideo/media.jsp" \
"?media=video&version=0s&idint={ident}&profile=pc&desplacament=0"
_media_schema = validate.Schema({
@@ -39,7 +39,6 @@
return HLSStream.parse_variant_playlist(self.session, stream['url'], name_fmt="{pixels}_{bitrate}")
except PluginError:
log.debug("Failed to get streams for: {0}".format(stream['geo']))
- pass
__plugin__ = TV3Cat
| {"golden_diff": "diff --git a/src/streamlink/plugins/tv3cat.py b/src/streamlink/plugins/tv3cat.py\n--- a/src/streamlink/plugins/tv3cat.py\n+++ b/src/streamlink/plugins/tv3cat.py\n@@ -9,7 +9,7 @@\n \n \n class TV3Cat(Plugin):\n- _url_re = re.compile(r\"http://(?:www.)?ccma.cat/tv3/directe/(.+?)/\")\n+ _url_re = re.compile(r\"https?://(?:www\\.)?ccma\\.cat/tv3/directe/(.+?)/\")\n _stream_info_url = \"http://dinamics.ccma.cat/pvideo/media.jsp\" \\\n \"?media=video&version=0s&idint={ident}&profile=pc&desplacament=0\"\n _media_schema = validate.Schema({\n@@ -39,7 +39,6 @@\n return HLSStream.parse_variant_playlist(self.session, stream['url'], name_fmt=\"{pixels}_{bitrate}\")\n except PluginError:\n log.debug(\"Failed to get streams for: {0}\".format(stream['geo']))\n- pass\n \n \n __plugin__ = TV3Cat\n", "issue": "Can't find \"tv3cat\" plugin\n## Plugin Issue\r\n\r\n<!-- Replace [ ] with [x] in order to check the box -->\r\n- [ X ] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\n\r\n<!-- Explain the plugin issue as thoroughly as you can. -->\r\n\r\nA image is worth a thousand words.\r\n\r\n\r\n\r\n### Reproduction steps \r\n\r\n<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->\r\n\r\nI've just using the \"python3-streamlink\" package provided by Fedora 32\r\n\r\nThanks!\r\n\n", "before_files": [{"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin, PluginError\nfrom streamlink.stream import HLSStream\nfrom streamlink.plugin.api import validate\n\nlog = logging.getLogger(__name__)\n\n\nclass TV3Cat(Plugin):\n _url_re = re.compile(r\"http://(?:www.)?ccma.cat/tv3/directe/(.+?)/\")\n _stream_info_url = \"http://dinamics.ccma.cat/pvideo/media.jsp\" \\\n \"?media=video&version=0s&idint={ident}&profile=pc&desplacament=0\"\n _media_schema = validate.Schema({\n \"geo\": validate.text,\n \"url\": validate.url(scheme=validate.any(\"http\", \"https\"))\n })\n _channel_schema = validate.Schema({\n \"media\": validate.any([_media_schema], _media_schema)},\n validate.get(\"media\"),\n # If there is only one item, it's not a list ... silly\n validate.transform(lambda x: x if isinstance(x, list) else [x])\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url) is not None\n\n def _get_streams(self):\n match = self._url_re.match(self.url)\n if match:\n ident = match.group(1)\n data_url = self._stream_info_url.format(ident=ident)\n stream_infos = self.session.http.json(self.session.http.get(data_url), schema=self._channel_schema)\n\n for stream in stream_infos:\n try:\n return HLSStream.parse_variant_playlist(self.session, stream['url'], name_fmt=\"{pixels}_{bitrate}\")\n except PluginError:\n log.debug(\"Failed to get streams for: {0}\".format(stream['geo']))\n pass\n\n\n__plugin__ = TV3Cat\n", "path": "src/streamlink/plugins/tv3cat.py"}]} | 1,204 | 252 |
gh_patches_debug_67164 | rasdani/github-patches | git_diff | keras-team__keras-2268 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Textual information for labels?
I seem unable to use text for labels, whilst using to_categorical
```
Using Theano backend.
Traceback (most recent call last):
File "playground.py", line 88, in <module>
train_model_and_test(number_of_epochs, number_of_classes, train_data, train_label, augmented_data_generator)
File "playground.py", line 62, in train_model_and_test
train_label = np_utils.to_categorical(train_label, number_of_classes)
File "/usr/local/lib/python2.7/dist-packages/keras/utils/np_utils.py", line 12, in to_categorical
y = np.asarray(y, dtype='int32')
File "/usr/lib/python2.7/dist-packages/numpy/core/numeric.py", line 460, in asarray
return array(a, dtype, copy=False, order=order)
ValueError: invalid literal for int() with base 10: 'yellow'
```
</issue>
<code>
[start of keras/utils/np_utils.py]
1 from __future__ import absolute_import
2 import numpy as np
3 import scipy as sp
4 from six.moves import range
5 from six.moves import zip
6
7
8 def to_categorical(y, nb_classes=None):
9 '''Convert class vector (integers from 0 to nb_classes)
10 to binary class matrix, for use with categorical_crossentropy.
11 '''
12 y = np.asarray(y, dtype='int32')
13 if not nb_classes:
14 nb_classes = np.max(y)+1
15 Y = np.zeros((len(y), nb_classes))
16 for i in range(len(y)):
17 Y[i, y[i]] = 1.
18 return Y
19
20
21 def normalize(a, axis=-1, order=2):
22 l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
23 l2[l2 == 0] = 1
24 return a / np.expand_dims(l2, axis)
25
26
27 def binary_logloss(p, y):
28 epsilon = 1e-15
29 p = sp.maximum(epsilon, p)
30 p = sp.minimum(1-epsilon, p)
31 res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
32 res *= -1.0/len(y)
33 return res
34
35
36 def multiclass_logloss(P, Y):
37 npreds = [P[i][Y[i]-1] for i in range(len(Y))]
38 score = -(1. / len(Y)) * np.sum(np.log(npreds))
39 return score
40
41
42 def accuracy(p, y):
43 return np.mean([a == b for a, b in zip(p, y)])
44
45
46 def probas_to_classes(y_pred):
47 if len(y_pred.shape) > 1 and y_pred.shape[1] > 1:
48 return categorical_probas_to_classes(y_pred)
49 return np.array([1 if p > 0.5 else 0 for p in y_pred])
50
51
52 def categorical_probas_to_classes(p):
53 return np.argmax(p, axis=1)
54
[end of keras/utils/np_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/keras/utils/np_utils.py b/keras/utils/np_utils.py
--- a/keras/utils/np_utils.py
+++ b/keras/utils/np_utils.py
@@ -9,7 +9,6 @@
'''Convert class vector (integers from 0 to nb_classes)
to binary class matrix, for use with categorical_crossentropy.
'''
- y = np.asarray(y, dtype='int32')
if not nb_classes:
nb_classes = np.max(y)+1
Y = np.zeros((len(y), nb_classes))
| {"golden_diff": "diff --git a/keras/utils/np_utils.py b/keras/utils/np_utils.py\n--- a/keras/utils/np_utils.py\n+++ b/keras/utils/np_utils.py\n@@ -9,7 +9,6 @@\n '''Convert class vector (integers from 0 to nb_classes)\n to binary class matrix, for use with categorical_crossentropy.\n '''\n- y = np.asarray(y, dtype='int32')\n if not nb_classes:\n nb_classes = np.max(y)+1\n Y = np.zeros((len(y), nb_classes))\n", "issue": "Textual information for labels?\nI seem unable to use text for labels, whilst using to_categorical\n\n```\nUsing Theano backend.\nTraceback (most recent call last):\n File \"playground.py\", line 88, in <module>\n train_model_and_test(number_of_epochs, number_of_classes, train_data, train_label, augmented_data_generator)\n File \"playground.py\", line 62, in train_model_and_test\n train_label = np_utils.to_categorical(train_label, number_of_classes)\n File \"/usr/local/lib/python2.7/dist-packages/keras/utils/np_utils.py\", line 12, in to_categorical\n y = np.asarray(y, dtype='int32')\n File \"/usr/lib/python2.7/dist-packages/numpy/core/numeric.py\", line 460, in asarray\n return array(a, dtype, copy=False, order=order)\nValueError: invalid literal for int() with base 10: 'yellow'\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import\nimport numpy as np\nimport scipy as sp\nfrom six.moves import range\nfrom six.moves import zip\n\n\ndef to_categorical(y, nb_classes=None):\n '''Convert class vector (integers from 0 to nb_classes)\n to binary class matrix, for use with categorical_crossentropy.\n '''\n y = np.asarray(y, dtype='int32')\n if not nb_classes:\n nb_classes = np.max(y)+1\n Y = np.zeros((len(y), nb_classes))\n for i in range(len(y)):\n Y[i, y[i]] = 1.\n return Y\n\n\ndef normalize(a, axis=-1, order=2):\n l2 = np.atleast_1d(np.linalg.norm(a, order, axis))\n l2[l2 == 0] = 1\n return a / np.expand_dims(l2, axis)\n\n\ndef binary_logloss(p, y):\n epsilon = 1e-15\n p = sp.maximum(epsilon, p)\n p = sp.minimum(1-epsilon, p)\n res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))\n res *= -1.0/len(y)\n return res\n\n\ndef multiclass_logloss(P, Y):\n npreds = [P[i][Y[i]-1] for i in range(len(Y))]\n score = -(1. / len(Y)) * np.sum(np.log(npreds))\n return score\n\n\ndef accuracy(p, y):\n return np.mean([a == b for a, b in zip(p, y)])\n\n\ndef probas_to_classes(y_pred):\n if len(y_pred.shape) > 1 and y_pred.shape[1] > 1:\n return categorical_probas_to_classes(y_pred)\n return np.array([1 if p > 0.5 else 0 for p in y_pred])\n\n\ndef categorical_probas_to_classes(p):\n return np.argmax(p, axis=1)\n", "path": "keras/utils/np_utils.py"}]} | 1,287 | 126 |
gh_patches_debug_32399 | rasdani/github-patches | git_diff | buildbot__buildbot-5169 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
buildbot-2.6.0.gitarchive.tar.gz is not generated correctly
```
<rjarry> tardyp: the .gitarchive.tar.gz file of release 2.6.0 is corrupted
16:57:31 <rjarry> buildbot-v2.5.1.gitarchive.tar.gz
16:57:31 <rjarry> 4.69 MB
16:57:31 <rjarry> buildbot-v2.5.1.gitarchive.tar.gz.sig
16:57:31 <rjarry> 310 Bytes
16:57:47 <rjarry> last one does not
16:57:49 <rjarry> buildbot-2.6.0.gitarchive.tar.gz
16:57:49 <rjarry> 15 Bytes
16:57:49 <rjarry> buildbot-2.6.0.gitarchive.tar.gz.sig
16:57:49 <rjarry> 310 Bytes
```
We shall regenerate the archive and signature manually.
preferably @p12tic , which originaly signed the rest of the release
This archive is used by debian packaging as they require to build everything from source
</issue>
<code>
[start of common/download_release.py]
1 #!/usr/bin/env python3
2
3 import os
4
5 import requests
6 import yaml
7
8
9 def download(url, fn):
10 print(url, fn)
11 if os.path.exists(fn):
12 return
13 with open(fn, 'wb') as f:
14 r = s.get(url, stream=True)
15 for c in r.iter_content(1024):
16 f.write(c)
17
18
19 def main():
20 global s
21 with open(os.path.expanduser("~/.config/hub")) as f:
22 conf = yaml.safe_load(f)
23 token = conf['github.com'][0]['oauth_token']
24
25 s = requests.Session()
26 s.headers.update({'Authorization': 'token ' + token})
27 r = s.get("https://api.github.com/repos/buildbot/buildbot/releases/latest")
28 r.raise_for_status()
29 r = r.json()
30 tag = r['name']
31 upload_url = r['upload_url'].split('{')[0]
32 assets = s.get("https://api.github.com/repos/buildbot/buildbot/releases/{id}/assets".format(id=r['id']))
33 assets.raise_for_status()
34 assets = assets.json()
35 os.system("mkdir -p dist")
36 for url in (a['browser_download_url'] for a in assets):
37 if url.endswith(".whl") or url.endswith(".tar.gz"):
38 fn = os.path.join('dist', url.split('/')[-1])
39 download(url, fn)
40 # download tag archive
41 url = "https://github.com/buildbot/buildbot/archive/{tag}.tar.gz".format(tag=tag)
42 fn = os.path.join('dist', "buildbot-{tag}.gitarchive.tar.gz".format(tag=tag))
43 download(url, fn)
44 sigfn = fn + ".sig"
45 if os.path.exists(sigfn):
46 os.unlink(sigfn)
47 # sign the tag archive for debian
48 os.system("gpg --output {} -b {}".format(sigfn, fn))
49 sigfnbase = os.path.basename(sigfn)
50 r = s.post(upload_url,
51 headers={'Content-Type': "application/pgp-signature"},
52 params={"name": sigfnbase},
53 data=open(sigfn, 'rb'))
54 print(r.content)
55 fnbase = os.path.basename(fn)
56 r = s.post(upload_url,
57 headers={'Content-Type': "application/gzip"},
58 params={"name": fnbase},
59 data=open(fn, 'rb'))
60 print(r.content)
61 # remove files so that twine upload do not upload them
62 os.unlink(sigfn)
63 os.unlink(fn)
64
65
66 if __name__ == '__main__':
67 main()
68
[end of common/download_release.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/common/download_release.py b/common/download_release.py
--- a/common/download_release.py
+++ b/common/download_release.py
@@ -6,18 +6,19 @@
import yaml
-def download(url, fn):
- print(url, fn)
+def download(session, url, fn):
if os.path.exists(fn):
- return
+ print('Removing old file {}'.format(fn))
+ os.unlink(fn)
+ print('Downloading {} from {}'.format(fn, url))
with open(fn, 'wb') as f:
- r = s.get(url, stream=True)
+ r = session.get(url, stream=True)
+ r.raise_for_status()
for c in r.iter_content(1024):
f.write(c)
def main():
- global s
with open(os.path.expanduser("~/.config/hub")) as f:
conf = yaml.safe_load(f)
token = conf['github.com'][0]['oauth_token']
@@ -32,15 +33,15 @@
assets = s.get("https://api.github.com/repos/buildbot/buildbot/releases/{id}/assets".format(id=r['id']))
assets.raise_for_status()
assets = assets.json()
- os.system("mkdir -p dist")
+ os.makedirs('dist', exist_ok=True)
for url in (a['browser_download_url'] for a in assets):
if url.endswith(".whl") or url.endswith(".tar.gz"):
fn = os.path.join('dist', url.split('/')[-1])
- download(url, fn)
+ download(s, url, fn)
# download tag archive
url = "https://github.com/buildbot/buildbot/archive/{tag}.tar.gz".format(tag=tag)
fn = os.path.join('dist', "buildbot-{tag}.gitarchive.tar.gz".format(tag=tag))
- download(url, fn)
+ download(s, url, fn)
sigfn = fn + ".sig"
if os.path.exists(sigfn):
os.unlink(sigfn)
| {"golden_diff": "diff --git a/common/download_release.py b/common/download_release.py\n--- a/common/download_release.py\n+++ b/common/download_release.py\n@@ -6,18 +6,19 @@\n import yaml\n \n \n-def download(url, fn):\n- print(url, fn)\n+def download(session, url, fn):\n if os.path.exists(fn):\n- return\n+ print('Removing old file {}'.format(fn))\n+ os.unlink(fn)\n+ print('Downloading {} from {}'.format(fn, url))\n with open(fn, 'wb') as f:\n- r = s.get(url, stream=True)\n+ r = session.get(url, stream=True)\n+ r.raise_for_status()\n for c in r.iter_content(1024):\n f.write(c)\n \n \n def main():\n- global s\n with open(os.path.expanduser(\"~/.config/hub\")) as f:\n conf = yaml.safe_load(f)\n token = conf['github.com'][0]['oauth_token']\n@@ -32,15 +33,15 @@\n assets = s.get(\"https://api.github.com/repos/buildbot/buildbot/releases/{id}/assets\".format(id=r['id']))\n assets.raise_for_status()\n assets = assets.json()\n- os.system(\"mkdir -p dist\")\n+ os.makedirs('dist', exist_ok=True)\n for url in (a['browser_download_url'] for a in assets):\n if url.endswith(\".whl\") or url.endswith(\".tar.gz\"):\n fn = os.path.join('dist', url.split('/')[-1])\n- download(url, fn)\n+ download(s, url, fn)\n # download tag archive\n url = \"https://github.com/buildbot/buildbot/archive/{tag}.tar.gz\".format(tag=tag)\n fn = os.path.join('dist', \"buildbot-{tag}.gitarchive.tar.gz\".format(tag=tag))\n- download(url, fn)\n+ download(s, url, fn)\n sigfn = fn + \".sig\"\n if os.path.exists(sigfn):\n os.unlink(sigfn)\n", "issue": "buildbot-2.6.0.gitarchive.tar.gz is not generated correctly\n```\r\n<rjarry> tardyp: the .gitarchive.tar.gz file of release 2.6.0 is corrupted\r\n16:57:31 <rjarry> buildbot-v2.5.1.gitarchive.tar.gz\r\n16:57:31 <rjarry> 4.69 MB\r\n16:57:31 <rjarry> buildbot-v2.5.1.gitarchive.tar.gz.sig\r\n16:57:31 <rjarry> 310 Bytes\r\n16:57:47 <rjarry> last one does not\r\n16:57:49 <rjarry> buildbot-2.6.0.gitarchive.tar.gz\r\n16:57:49 <rjarry> 15 Bytes\r\n16:57:49 <rjarry> buildbot-2.6.0.gitarchive.tar.gz.sig\r\n16:57:49 <rjarry> 310 Bytes\r\n```\r\nWe shall regenerate the archive and signature manually.\r\npreferably @p12tic , which originaly signed the rest of the release\r\n\r\nThis archive is used by debian packaging as they require to build everything from source\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport os\n\nimport requests\nimport yaml\n\n\ndef download(url, fn):\n print(url, fn)\n if os.path.exists(fn):\n return\n with open(fn, 'wb') as f:\n r = s.get(url, stream=True)\n for c in r.iter_content(1024):\n f.write(c)\n\n\ndef main():\n global s\n with open(os.path.expanduser(\"~/.config/hub\")) as f:\n conf = yaml.safe_load(f)\n token = conf['github.com'][0]['oauth_token']\n\n s = requests.Session()\n s.headers.update({'Authorization': 'token ' + token})\n r = s.get(\"https://api.github.com/repos/buildbot/buildbot/releases/latest\")\n r.raise_for_status()\n r = r.json()\n tag = r['name']\n upload_url = r['upload_url'].split('{')[0]\n assets = s.get(\"https://api.github.com/repos/buildbot/buildbot/releases/{id}/assets\".format(id=r['id']))\n assets.raise_for_status()\n assets = assets.json()\n os.system(\"mkdir -p dist\")\n for url in (a['browser_download_url'] for a in assets):\n if url.endswith(\".whl\") or url.endswith(\".tar.gz\"):\n fn = os.path.join('dist', url.split('/')[-1])\n download(url, fn)\n # download tag archive\n url = \"https://github.com/buildbot/buildbot/archive/{tag}.tar.gz\".format(tag=tag)\n fn = os.path.join('dist', \"buildbot-{tag}.gitarchive.tar.gz\".format(tag=tag))\n download(url, fn)\n sigfn = fn + \".sig\"\n if os.path.exists(sigfn):\n os.unlink(sigfn)\n # sign the tag archive for debian\n os.system(\"gpg --output {} -b {}\".format(sigfn, fn))\n sigfnbase = os.path.basename(sigfn)\n r = s.post(upload_url,\n headers={'Content-Type': \"application/pgp-signature\"},\n params={\"name\": sigfnbase},\n data=open(sigfn, 'rb'))\n print(r.content)\n fnbase = os.path.basename(fn)\n r = s.post(upload_url,\n headers={'Content-Type': \"application/gzip\"},\n params={\"name\": fnbase},\n data=open(fn, 'rb'))\n print(r.content)\n # remove files so that twine upload do not upload them\n os.unlink(sigfn)\n os.unlink(fn)\n\n\nif __name__ == '__main__':\n main()\n", "path": "common/download_release.py"}]} | 1,503 | 442 |
gh_patches_debug_23914 | rasdani/github-patches | git_diff | netbox-community__netbox-4849 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Swagger references IP address family incorrectly as string in NestedIPAddress.
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reproducible bugs. If you need assistance with
NetBox installation, or if you have a general question, DO NOT open an
issue. Instead, post to our mailing list:
https://groups.google.com/forum/#!forum/netbox-discuss
Please describe the environment in which you are running NetBox. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Python version: netbox-docker
* NetBox version: 2.8.6
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of NetBox. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynetbox.
-->
### Steps to Reproduce
1. Generate go-netbox with latest swagger https://github.com/netbox-community/go-netbox
2. Attempt to pull device information with dcim.DcimDevicesListParams on a device / rack that contains a primary ip configured on an device.
3. "cannot unmarshal number into Go struct field NestedIPAddress.results.primary_ip.family of type string"
<!-- What did you expect to happen? -->
### Expected Behavior
Swagger to match returned data.
<!-- What happened instead? -->
### Observed Behavior
Api returns Family as an number instead of a string, mismatching with swagger.
```
json: cannot unmarshal number into Go struct field NestedIPAddress.results.primary_ip.family of type string
```
</issue>
<code>
[start of netbox/ipam/api/nested_serializers.py]
1 from rest_framework import serializers
2
3 from ipam import models
4 from utilities.api import WritableNestedSerializer
5
6 __all__ = [
7 'NestedAggregateSerializer',
8 'NestedIPAddressSerializer',
9 'NestedPrefixSerializer',
10 'NestedRIRSerializer',
11 'NestedRoleSerializer',
12 'NestedServiceSerializer',
13 'NestedVLANGroupSerializer',
14 'NestedVLANSerializer',
15 'NestedVRFSerializer',
16 ]
17
18
19 #
20 # VRFs
21 #
22
23 class NestedVRFSerializer(WritableNestedSerializer):
24 url = serializers.HyperlinkedIdentityField(view_name='ipam-api:vrf-detail')
25 prefix_count = serializers.IntegerField(read_only=True)
26
27 class Meta:
28 model = models.VRF
29 fields = ['id', 'url', 'name', 'rd', 'prefix_count']
30
31
32 #
33 # RIRs/aggregates
34 #
35
36 class NestedRIRSerializer(WritableNestedSerializer):
37 url = serializers.HyperlinkedIdentityField(view_name='ipam-api:rir-detail')
38 aggregate_count = serializers.IntegerField(read_only=True)
39
40 class Meta:
41 model = models.RIR
42 fields = ['id', 'url', 'name', 'slug', 'aggregate_count']
43
44
45 class NestedAggregateSerializer(WritableNestedSerializer):
46 url = serializers.HyperlinkedIdentityField(view_name='ipam-api:aggregate-detail')
47
48 class Meta:
49 model = models.Aggregate
50 fields = ['id', 'url', 'family', 'prefix']
51
52
53 #
54 # VLANs
55 #
56
57 class NestedRoleSerializer(WritableNestedSerializer):
58 url = serializers.HyperlinkedIdentityField(view_name='ipam-api:role-detail')
59 prefix_count = serializers.IntegerField(read_only=True)
60 vlan_count = serializers.IntegerField(read_only=True)
61
62 class Meta:
63 model = models.Role
64 fields = ['id', 'url', 'name', 'slug', 'prefix_count', 'vlan_count']
65
66
67 class NestedVLANGroupSerializer(WritableNestedSerializer):
68 url = serializers.HyperlinkedIdentityField(view_name='ipam-api:vlangroup-detail')
69 vlan_count = serializers.IntegerField(read_only=True)
70
71 class Meta:
72 model = models.VLANGroup
73 fields = ['id', 'url', 'name', 'slug', 'vlan_count']
74
75
76 class NestedVLANSerializer(WritableNestedSerializer):
77 url = serializers.HyperlinkedIdentityField(view_name='ipam-api:vlan-detail')
78
79 class Meta:
80 model = models.VLAN
81 fields = ['id', 'url', 'vid', 'name', 'display_name']
82
83
84 #
85 # Prefixes
86 #
87
88 class NestedPrefixSerializer(WritableNestedSerializer):
89 url = serializers.HyperlinkedIdentityField(view_name='ipam-api:prefix-detail')
90
91 class Meta:
92 model = models.Prefix
93 fields = ['id', 'url', 'family', 'prefix']
94
95
96 #
97 # IP addresses
98 #
99
100 class NestedIPAddressSerializer(WritableNestedSerializer):
101 url = serializers.HyperlinkedIdentityField(view_name='ipam-api:ipaddress-detail')
102
103 class Meta:
104 model = models.IPAddress
105 fields = ['id', 'url', 'family', 'address']
106
107
108 #
109 # Services
110 #
111
112 class NestedServiceSerializer(WritableNestedSerializer):
113 url = serializers.HyperlinkedIdentityField(view_name='ipam-api:service-detail')
114
115 class Meta:
116 model = models.Service
117 fields = ['id', 'url', 'name', 'protocol', 'port']
118
[end of netbox/ipam/api/nested_serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/ipam/api/nested_serializers.py b/netbox/ipam/api/nested_serializers.py
--- a/netbox/ipam/api/nested_serializers.py
+++ b/netbox/ipam/api/nested_serializers.py
@@ -44,6 +44,7 @@
class NestedAggregateSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='ipam-api:aggregate-detail')
+ family = serializers.IntegerField(read_only=True)
class Meta:
model = models.Aggregate
@@ -87,6 +88,7 @@
class NestedPrefixSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='ipam-api:prefix-detail')
+ family = serializers.IntegerField(read_only=True)
class Meta:
model = models.Prefix
@@ -99,6 +101,7 @@
class NestedIPAddressSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name='ipam-api:ipaddress-detail')
+ family = serializers.IntegerField(read_only=True)
class Meta:
model = models.IPAddress
| {"golden_diff": "diff --git a/netbox/ipam/api/nested_serializers.py b/netbox/ipam/api/nested_serializers.py\n--- a/netbox/ipam/api/nested_serializers.py\n+++ b/netbox/ipam/api/nested_serializers.py\n@@ -44,6 +44,7 @@\n \n class NestedAggregateSerializer(WritableNestedSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='ipam-api:aggregate-detail')\n+ family = serializers.IntegerField(read_only=True)\n \n class Meta:\n model = models.Aggregate\n@@ -87,6 +88,7 @@\n \n class NestedPrefixSerializer(WritableNestedSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='ipam-api:prefix-detail')\n+ family = serializers.IntegerField(read_only=True)\n \n class Meta:\n model = models.Prefix\n@@ -99,6 +101,7 @@\n \n class NestedIPAddressSerializer(WritableNestedSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='ipam-api:ipaddress-detail')\n+ family = serializers.IntegerField(read_only=True)\n \n class Meta:\n model = models.IPAddress\n", "issue": "Swagger references IP address family incorrectly as string in NestedIPAddress.\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reproducible bugs. If you need assistance with\r\n NetBox installation, or if you have a general question, DO NOT open an\r\n issue. Instead, post to our mailing list:\r\n\r\n https://groups.google.com/forum/#!forum/netbox-discuss\r\n\r\n Please describe the environment in which you are running NetBox. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Python version: netbox-docker\r\n* NetBox version: 2.8.6\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of NetBox. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynetbox.\r\n-->\r\n### Steps to Reproduce\r\n1. Generate go-netbox with latest swagger https://github.com/netbox-community/go-netbox\r\n2. Attempt to pull device information with dcim.DcimDevicesListParams on a device / rack that contains a primary ip configured on an device.\r\n3. \"cannot unmarshal number into Go struct field NestedIPAddress.results.primary_ip.family of type string\"\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nSwagger to match returned data.\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nApi returns Family as an number instead of a string, mismatching with swagger.\r\n```\r\njson: cannot unmarshal number into Go struct field NestedIPAddress.results.primary_ip.family of type string\r\n```\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom ipam import models\nfrom utilities.api import WritableNestedSerializer\n\n__all__ = [\n 'NestedAggregateSerializer',\n 'NestedIPAddressSerializer',\n 'NestedPrefixSerializer',\n 'NestedRIRSerializer',\n 'NestedRoleSerializer',\n 'NestedServiceSerializer',\n 'NestedVLANGroupSerializer',\n 'NestedVLANSerializer',\n 'NestedVRFSerializer',\n]\n\n\n#\n# VRFs\n#\n\nclass NestedVRFSerializer(WritableNestedSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='ipam-api:vrf-detail')\n prefix_count = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = models.VRF\n fields = ['id', 'url', 'name', 'rd', 'prefix_count']\n\n\n#\n# RIRs/aggregates\n#\n\nclass NestedRIRSerializer(WritableNestedSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='ipam-api:rir-detail')\n aggregate_count = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = models.RIR\n fields = ['id', 'url', 'name', 'slug', 'aggregate_count']\n\n\nclass NestedAggregateSerializer(WritableNestedSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='ipam-api:aggregate-detail')\n\n class Meta:\n model = models.Aggregate\n fields = ['id', 'url', 'family', 'prefix']\n\n\n#\n# VLANs\n#\n\nclass NestedRoleSerializer(WritableNestedSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='ipam-api:role-detail')\n prefix_count = serializers.IntegerField(read_only=True)\n vlan_count = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = models.Role\n fields = ['id', 'url', 'name', 'slug', 'prefix_count', 'vlan_count']\n\n\nclass NestedVLANGroupSerializer(WritableNestedSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='ipam-api:vlangroup-detail')\n vlan_count = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = models.VLANGroup\n fields = ['id', 'url', 'name', 'slug', 'vlan_count']\n\n\nclass NestedVLANSerializer(WritableNestedSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='ipam-api:vlan-detail')\n\n class Meta:\n model = models.VLAN\n fields = ['id', 'url', 'vid', 'name', 'display_name']\n\n\n#\n# Prefixes\n#\n\nclass NestedPrefixSerializer(WritableNestedSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='ipam-api:prefix-detail')\n\n class Meta:\n model = models.Prefix\n fields = ['id', 'url', 'family', 'prefix']\n\n\n#\n# IP addresses\n#\n\nclass NestedIPAddressSerializer(WritableNestedSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='ipam-api:ipaddress-detail')\n\n class Meta:\n model = models.IPAddress\n fields = ['id', 'url', 'family', 'address']\n\n\n#\n# Services\n#\n\nclass NestedServiceSerializer(WritableNestedSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='ipam-api:service-detail')\n\n class Meta:\n model = models.Service\n fields = ['id', 'url', 'name', 'protocol', 'port']\n", "path": "netbox/ipam/api/nested_serializers.py"}]} | 1,923 | 244 |
gh_patches_debug_25784 | rasdani/github-patches | git_diff | google__openhtf-185 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Start Test Prompt is timing out and raising PromptUnansweredError
Start Test Prompt appears to be using same timeout as user prompts. When using this in loop mode we shouldn't have a timeout
</issue>
<code>
[start of openhtf/exe/triggers.py]
1 # Copyright 2014 Google Inc. All Rights Reserved.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Module for handling the triggering of test start/stop.
16
17 In order for the TestExecutor (see exe/__init__.py) to know when to start a
18 test, it needs a way to know when a DUT has been connected. Also, the test
19 can't restart until the DUT is removed and re-appears. The serial for the
20 TestRun can be read from the DUT, or from the frontend.
21
22 This module provides some built-in triggers. Custom implementations of test
23 start and stop triggers must follow the following interface:
24
25 TestStart:
26 Args:
27 None
28 Returns:
29 DUT identifier, or None if it is not known at test start time.
30
31 TestStop:
32 Args:
33 dut_id: DUT identifier of the test that is stopping.
34 Returns:
35 Blocks until the test can re-start, then returns None.
36 """
37
38 import logging
39 import time
40
41 import gflags
42
43 from openhtf.io import user_input
44
45 gflags.DEFINE_string('dut_serial', 'UNKNOWN_DUT_ID',
46 'DUT serial to start the test with. '
47 'Only use if using the AutoStart trigger.')
48
49 FLAGS = gflags.FLAGS
50 _LOG = logging.getLogger(__name__)
51
52 def AutoStart(): # pylint: disable=invalid-name
53 """Start the test immediately with a dummy DUT ID."""
54 return FLAGS.dut_serial
55
56
57 def AutoStop(dummy_dut_id): # pylint: disable=invalid-name
58 """Stop the test immediately regardless of DUT ID given."""
59 pass
60
61
62 # pylint: disable=invalid-name
63 def PromptForTestStart(message='Provide a DUT ID in order to start the test.',
64 text_input=True):
65 """Make a test start trigger based on prompting the user for input."""
66 def trigger(): # pylint: disable=missing-docstring
67 prompt_manager = user_input.get_prompt_manager()
68 return prompt_manager.DisplayPrompt(message, text_input=text_input)
69 return trigger
70
71
72 def PromptForTestStop(message='Hit ENTER to complete the test.',
73 text_input=False):
74 """Make a test stop trigger based on prompting the user for a response."""
75 def trigger(dummy_dut_id): # pylint: disable=missing-docstring
76 prompt_manager = user_input.get_prompt_manager()
77 return prompt_manager.DisplayPrompt(message, text_input=text_input)
78 return trigger
79
[end of openhtf/exe/triggers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openhtf/exe/triggers.py b/openhtf/exe/triggers.py
--- a/openhtf/exe/triggers.py
+++ b/openhtf/exe/triggers.py
@@ -61,18 +61,20 @@
# pylint: disable=invalid-name
def PromptForTestStart(message='Provide a DUT ID in order to start the test.',
- text_input=True):
+ text_input=False, timeout_s=60*60*24):
"""Make a test start trigger based on prompting the user for input."""
def trigger(): # pylint: disable=missing-docstring
prompt_manager = user_input.get_prompt_manager()
- return prompt_manager.DisplayPrompt(message, text_input=text_input)
+ return prompt_manager.DisplayPrompt(
+ message, text_input=text_input, timeout_s=timeout_s)
return trigger
def PromptForTestStop(message='Hit ENTER to complete the test.',
- text_input=False):
+ text_input=False, timeout_s=60*60*24):
"""Make a test stop trigger based on prompting the user for a response."""
def trigger(dummy_dut_id): # pylint: disable=missing-docstring
prompt_manager = user_input.get_prompt_manager()
- return prompt_manager.DisplayPrompt(message, text_input=text_input)
+ return prompt_manager.DisplayPrompt(
+ message, text_input=text_input, timeout_s=timeout_s)
return trigger
| {"golden_diff": "diff --git a/openhtf/exe/triggers.py b/openhtf/exe/triggers.py\n--- a/openhtf/exe/triggers.py\n+++ b/openhtf/exe/triggers.py\n@@ -61,18 +61,20 @@\n \n # pylint: disable=invalid-name\n def PromptForTestStart(message='Provide a DUT ID in order to start the test.',\n- text_input=True):\n+ text_input=False, timeout_s=60*60*24):\n \"\"\"Make a test start trigger based on prompting the user for input.\"\"\"\n def trigger(): # pylint: disable=missing-docstring\n prompt_manager = user_input.get_prompt_manager()\n- return prompt_manager.DisplayPrompt(message, text_input=text_input)\n+ return prompt_manager.DisplayPrompt(\n+ message, text_input=text_input, timeout_s=timeout_s)\n return trigger\n \n \n def PromptForTestStop(message='Hit ENTER to complete the test.',\n- text_input=False):\n+ text_input=False, timeout_s=60*60*24):\n \"\"\"Make a test stop trigger based on prompting the user for a response.\"\"\"\n def trigger(dummy_dut_id): # pylint: disable=missing-docstring\n prompt_manager = user_input.get_prompt_manager()\n- return prompt_manager.DisplayPrompt(message, text_input=text_input)\n+ return prompt_manager.DisplayPrompt(\n+ message, text_input=text_input, timeout_s=timeout_s)\n return trigger\n", "issue": "Start Test Prompt is timing out and raising PromptUnansweredError\nStart Test Prompt appears to be using same timeout as user prompts. When using this in loop mode we shouldn't have a timeout\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module for handling the triggering of test start/stop.\n\nIn order for the TestExecutor (see exe/__init__.py) to know when to start a\ntest, it needs a way to know when a DUT has been connected. Also, the test\ncan't restart until the DUT is removed and re-appears. The serial for the\nTestRun can be read from the DUT, or from the frontend.\n\nThis module provides some built-in triggers. Custom implementations of test\nstart and stop triggers must follow the following interface:\n\nTestStart:\n Args:\n None\n Returns:\n DUT identifier, or None if it is not known at test start time.\n\nTestStop:\n Args:\n dut_id: DUT identifier of the test that is stopping.\n Returns:\n Blocks until the test can re-start, then returns None.\n\"\"\"\n\nimport logging\nimport time\n\nimport gflags\n\nfrom openhtf.io import user_input\n\ngflags.DEFINE_string('dut_serial', 'UNKNOWN_DUT_ID',\n 'DUT serial to start the test with. '\n 'Only use if using the AutoStart trigger.')\n\nFLAGS = gflags.FLAGS\n_LOG = logging.getLogger(__name__)\n\ndef AutoStart(): # pylint: disable=invalid-name\n \"\"\"Start the test immediately with a dummy DUT ID.\"\"\"\n return FLAGS.dut_serial\n\n\ndef AutoStop(dummy_dut_id): # pylint: disable=invalid-name\n \"\"\"Stop the test immediately regardless of DUT ID given.\"\"\"\n pass\n\n\n# pylint: disable=invalid-name\ndef PromptForTestStart(message='Provide a DUT ID in order to start the test.',\n text_input=True):\n \"\"\"Make a test start trigger based on prompting the user for input.\"\"\"\n def trigger(): # pylint: disable=missing-docstring\n prompt_manager = user_input.get_prompt_manager()\n return prompt_manager.DisplayPrompt(message, text_input=text_input)\n return trigger\n\n\ndef PromptForTestStop(message='Hit ENTER to complete the test.',\n text_input=False):\n \"\"\"Make a test stop trigger based on prompting the user for a response.\"\"\"\n def trigger(dummy_dut_id): # pylint: disable=missing-docstring\n prompt_manager = user_input.get_prompt_manager()\n return prompt_manager.DisplayPrompt(message, text_input=text_input)\n return trigger\n", "path": "openhtf/exe/triggers.py"}]} | 1,368 | 315 |
gh_patches_debug_279 | rasdani/github-patches | git_diff | sanic-org__sanic-1292 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New release on Pypi ?
Hello,
I was looking for a tool to autoreload my code when I develop and I found this commit : https://github.com/channelcat/sanic/commit/52c2a8484e6aa5fa13aaade49e1f2597dd006e15
So it seems Sanic already integrates it since December 07, 2017. But the the latest version on Pypi dates from the day before (https://github.com/channelcat/sanic/commit/1ea3ab7fe8ab03a6ddf4d75a3de8cb719f4c584c) : https://pypi.org/project/Sanic/#history
Is-it possible to release a new version on Pypi please ? Other features (like the UUID support in routes) are also interesting :)
Thanks in advance !
</issue>
<code>
[start of sanic/__init__.py]
1 from sanic.app import Sanic
2 from sanic.blueprints import Blueprint
3
4 __version__ = '0.7.0'
5
6 __all__ = ['Sanic', 'Blueprint']
7
[end of sanic/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sanic/__init__.py b/sanic/__init__.py
--- a/sanic/__init__.py
+++ b/sanic/__init__.py
@@ -1,6 +1,6 @@
from sanic.app import Sanic
from sanic.blueprints import Blueprint
-__version__ = '0.7.0'
+__version__ = '0.8.0'
__all__ = ['Sanic', 'Blueprint']
| {"golden_diff": "diff --git a/sanic/__init__.py b/sanic/__init__.py\n--- a/sanic/__init__.py\n+++ b/sanic/__init__.py\n@@ -1,6 +1,6 @@\n from sanic.app import Sanic\n from sanic.blueprints import Blueprint\n \n-__version__ = '0.7.0'\n+__version__ = '0.8.0'\n \n __all__ = ['Sanic', 'Blueprint']\n", "issue": "New release on Pypi ?\nHello,\r\n\r\nI was looking for a tool to autoreload my code when I develop and I found this commit : https://github.com/channelcat/sanic/commit/52c2a8484e6aa5fa13aaade49e1f2597dd006e15\r\n\r\nSo it seems Sanic already integrates it since December 07, 2017. But the the latest version on Pypi dates from the day before (https://github.com/channelcat/sanic/commit/1ea3ab7fe8ab03a6ddf4d75a3de8cb719f4c584c) : https://pypi.org/project/Sanic/#history\r\n\r\nIs-it possible to release a new version on Pypi please ? Other features (like the UUID support in routes) are also interesting :)\r\n\r\nThanks in advance !\n", "before_files": [{"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n__version__ = '0.7.0'\n\n__all__ = ['Sanic', 'Blueprint']\n", "path": "sanic/__init__.py"}]} | 782 | 99 |
gh_patches_debug_4424 | rasdani/github-patches | git_diff | mozilla__bugbug-2654 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Replace typing.Dict with dict
It is now possible to use `dict` directly instead of `typing.Dict` in type definitions.
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import os
7
8 from setuptools import find_packages, setup
9
10 here = os.path.dirname(__file__)
11
12
13 def read_requirements(file_):
14 with open(os.path.join(here, file_)) as f:
15 return sorted(list(set(line.split("#")[0].strip() for line in f)))
16
17
18 install_requires = read_requirements("requirements.txt")
19
20
21 with open(os.path.join(here, "VERSION")) as f:
22 version = f.read().strip()
23
24 # Read the extra requirements
25 extras = ["nlp", "nn"]
26
27 extras_require = {}
28
29 for extra in extras:
30 extras_require[extra] = read_requirements("extra-%s-requirements.txt" % extra)
31
32
33 setup(
34 name="bugbug",
35 version=version,
36 description="ML tools for Mozilla projects",
37 author="Marco Castelluccio",
38 author_email="[email protected]",
39 install_requires=install_requires,
40 extras_require=extras_require,
41 packages=find_packages(exclude=["contrib", "docs", "tests"]),
42 include_package_data=True,
43 license="MPL2",
44 entry_points={
45 "console_scripts": [
46 "bugbug-data-commits = scripts.commit_retriever:main",
47 "bugbug-data-bugzilla = scripts.bug_retriever:main",
48 "bugbug-data-test-scheduling-history = scripts.test_scheduling_history_retriever:main",
49 "bugbug-data-revisions = scripts.revision_retriever:main",
50 "bugbug-train = scripts.trainer:main",
51 "bugbug-train-similarity = scripts.similarity_trainer:main",
52 "bugbug-check = scripts.check:main",
53 "bugbug-microannotate-generate = scripts.microannotate_generator:main",
54 "bugbug-classify-commit = scripts.commit_classifier:main",
55 "bugbug-classify-bug = scripts.bug_classifier:main",
56 "bugbug-regressor-finder = scripts.regressor_finder:main",
57 "bugbug-retrieve-training-metrics = scripts.retrieve_training_metrics:main",
58 "bugbug-analyze-training-metrics = scripts.analyze_training_metrics:main",
59 "bugbug-check-all-metrics = scripts.check_all_metrics:main",
60 "bugbug-past-bugs-by-unit = scripts.past_bugs_by_unit:main",
61 "bugbug-testing-policy-stats = scripts.testing_policy_stats:main",
62 "bugbug-generate-landings-risk-report = scripts.generate_landings_risk_report:main",
63 "bugbug-shadow-scheduler-stats = scripts.shadow_scheduler_stats:main",
64 "bugbug-data-github = scripts.github_issue_retriever:main",
65 ]
66 },
67 classifiers=[
68 "Programming Language :: Python :: 3.7",
69 "Programming Language :: Python :: 3.8",
70 "Programming Language :: Python :: 3.9",
71 "Programming Language :: Python :: 3 :: Only",
72 "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
73 ],
74 )
75
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -65,8 +65,6 @@
]
},
classifiers=[
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,8 +65,6 @@\n ]\n },\n classifiers=[\n- \"Programming Language :: Python :: 3.7\",\n- \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)\",\n", "issue": "Replace typing.Dict with dict\nIt is now possible to use `dict` directly instead of `typing.Dict` in type definitions.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport os\n\nfrom setuptools import find_packages, setup\n\nhere = os.path.dirname(__file__)\n\n\ndef read_requirements(file_):\n with open(os.path.join(here, file_)) as f:\n return sorted(list(set(line.split(\"#\")[0].strip() for line in f)))\n\n\ninstall_requires = read_requirements(\"requirements.txt\")\n\n\nwith open(os.path.join(here, \"VERSION\")) as f:\n version = f.read().strip()\n\n# Read the extra requirements\nextras = [\"nlp\", \"nn\"]\n\nextras_require = {}\n\nfor extra in extras:\n extras_require[extra] = read_requirements(\"extra-%s-requirements.txt\" % extra)\n\n\nsetup(\n name=\"bugbug\",\n version=version,\n description=\"ML tools for Mozilla projects\",\n author=\"Marco Castelluccio\",\n author_email=\"[email protected]\",\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(exclude=[\"contrib\", \"docs\", \"tests\"]),\n include_package_data=True,\n license=\"MPL2\",\n entry_points={\n \"console_scripts\": [\n \"bugbug-data-commits = scripts.commit_retriever:main\",\n \"bugbug-data-bugzilla = scripts.bug_retriever:main\",\n \"bugbug-data-test-scheduling-history = scripts.test_scheduling_history_retriever:main\",\n \"bugbug-data-revisions = scripts.revision_retriever:main\",\n \"bugbug-train = scripts.trainer:main\",\n \"bugbug-train-similarity = scripts.similarity_trainer:main\",\n \"bugbug-check = scripts.check:main\",\n \"bugbug-microannotate-generate = scripts.microannotate_generator:main\",\n \"bugbug-classify-commit = scripts.commit_classifier:main\",\n \"bugbug-classify-bug = scripts.bug_classifier:main\",\n \"bugbug-regressor-finder = scripts.regressor_finder:main\",\n \"bugbug-retrieve-training-metrics = scripts.retrieve_training_metrics:main\",\n \"bugbug-analyze-training-metrics = scripts.analyze_training_metrics:main\",\n \"bugbug-check-all-metrics = scripts.check_all_metrics:main\",\n \"bugbug-past-bugs-by-unit = scripts.past_bugs_by_unit:main\",\n \"bugbug-testing-policy-stats = scripts.testing_policy_stats:main\",\n \"bugbug-generate-landings-risk-report = scripts.generate_landings_risk_report:main\",\n \"bugbug-shadow-scheduler-stats = scripts.shadow_scheduler_stats:main\",\n \"bugbug-data-github = scripts.github_issue_retriever:main\",\n ]\n },\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)\",\n ],\n)\n", "path": "setup.py"}]} | 1,402 | 112 |
gh_patches_debug_24527 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-7570 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NoneType Issue
I dropped a yaml file into a new OU/SubOU and its not working, though it works in other OUs just fine. Nothing was changed in the file but I am still getting this error, not sure why.
```
Traceback (most recent call last):
File "/root/.pyenv/versions/3.9.12/bin/custodian", line 8, in <module>
sys.exit(main())
File "/root/.pyenv/versions/3.9.12/lib/python3.9/site-packages/c7n/cli.py", line 363, in main
command(config)
File "/root/.pyenv/versions/3.9.12/lib/python3.9/site-packages/c7n/commands.py", line 219, in validate
structure.validate(data)
File "/root/.pyenv/versions/3.9.12/lib/python3.9/site-packages/c7n/structure.py", line 48, in validate
self.validate_policy(p)
File "/root/.pyenv/versions/3.9.12/lib/python3.9/site-packages/c7n/structure.py", line 78, in validate_policy
for a in p.get('actions', ()):
TypeError: 'NoneType' object is not iterable
```
</issue>
<code>
[start of c7n/structure.py]
1 # Copyright The Cloud Custodian Authors.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import json
5
6 from c7n.exceptions import PolicyValidationError
7
8
9 class StructureParser:
10 """Provide fast validation and inspection of a policy file.
11
12 Intent is to provide more humane validation for top level errors
13 instead of printing full schema as error message.
14 """
15 allowed_file_keys = {'vars', 'policies'}
16 required_policy_keys = {'name', 'resource'}
17 allowed_policy_keys = {'name', 'resource', 'title', 'description', 'mode',
18 'tags', 'max-resources', 'metadata', 'query',
19 'filters', 'actions', 'source', 'conditions',
20 # legacy keys subject to deprecation.
21 'region', 'start', 'end', 'tz', 'max-resources-percent',
22 'comments', 'comment'}
23
24 def validate(self, data):
25 if not isinstance(data, dict):
26 raise PolicyValidationError((
27 "Policy file top level data structure "
28 "should be a mapping/dict, instead found:%s") % (
29 type(data).__name__))
30 dkeys = set(data.keys())
31
32 extra = dkeys.difference(self.allowed_file_keys)
33 if extra:
34 raise PolicyValidationError((
35 'Policy files top level keys are %s, found extra: %s' % (
36 ', '.join(self.allowed_file_keys),
37 ', '.join(extra))))
38
39 if 'policies' not in data:
40 raise PolicyValidationError("`policies` list missing")
41
42 pdata = data.get('policies', [])
43 if not isinstance(pdata, list):
44 raise PolicyValidationError((
45 '`policies` key should be an array/list found: %s' % (
46 type(pdata).__name__)))
47 for p in pdata:
48 self.validate_policy(p)
49
50 def validate_policy(self, p):
51 if not isinstance(p, dict):
52 raise PolicyValidationError((
53 'policy must be a dictionary/mapping found:%s policy:\n %s' % (
54 type(p).__name__, json.dumps(p, indent=2))))
55 pkeys = set(p)
56 if self.required_policy_keys.difference(pkeys):
57 raise PolicyValidationError(
58 'policy missing required keys (name, resource) data:\n %s' % (
59 json.dumps(p, indent=2)))
60 if pkeys.difference(self.allowed_policy_keys):
61 raise PolicyValidationError(
62 'policy:%s has unknown keys: %s' % (
63 p['name'], ','.join(pkeys.difference(self.allowed_policy_keys))))
64 if not isinstance(p.get('filters', []), (list, type(None))):
65 raise PolicyValidationError((
66 'policy:%s must use a list for filters found:%s' % (
67 p['name'], type(p['filters']).__name__)))
68 element_types = (dict, str)
69 for f in p.get('filters', ()):
70 if not isinstance(f, element_types):
71 raise PolicyValidationError((
72 'policy:%s filter must be a mapping/dict found:%s' % (
73 p.get('name', 'unknown'), type(f).__name__)))
74 if not isinstance(p.get('actions', []), (list, type(None))):
75 raise PolicyValidationError((
76 'policy:%s must use a list for actions found:%s' % (
77 p.get('name', 'unknown'), type(p['actions']).__name__)))
78 for a in p.get('actions', ()):
79 if not isinstance(a, element_types):
80 raise PolicyValidationError((
81 'policy:%s action must be a mapping/dict found:%s' % (
82 p.get('name', 'unknown'), type(a).__name__)))
83
84 def get_resource_types(self, data):
85 resources = set()
86 for p in data.get('policies', []):
87 rtype = p['resource']
88 if '.' not in rtype:
89 rtype = 'aws.%s' % rtype
90 resources.add(rtype)
91 return resources
92
[end of c7n/structure.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/c7n/structure.py b/c7n/structure.py
--- a/c7n/structure.py
+++ b/c7n/structure.py
@@ -66,7 +66,7 @@
'policy:%s must use a list for filters found:%s' % (
p['name'], type(p['filters']).__name__)))
element_types = (dict, str)
- for f in p.get('filters', ()):
+ for f in p.get('filters', ()) or []:
if not isinstance(f, element_types):
raise PolicyValidationError((
'policy:%s filter must be a mapping/dict found:%s' % (
@@ -75,7 +75,7 @@
raise PolicyValidationError((
'policy:%s must use a list for actions found:%s' % (
p.get('name', 'unknown'), type(p['actions']).__name__)))
- for a in p.get('actions', ()):
+ for a in p.get('actions', ()) or []:
if not isinstance(a, element_types):
raise PolicyValidationError((
'policy:%s action must be a mapping/dict found:%s' % (
| {"golden_diff": "diff --git a/c7n/structure.py b/c7n/structure.py\n--- a/c7n/structure.py\n+++ b/c7n/structure.py\n@@ -66,7 +66,7 @@\n 'policy:%s must use a list for filters found:%s' % (\n p['name'], type(p['filters']).__name__)))\n element_types = (dict, str)\n- for f in p.get('filters', ()):\n+ for f in p.get('filters', ()) or []:\n if not isinstance(f, element_types):\n raise PolicyValidationError((\n 'policy:%s filter must be a mapping/dict found:%s' % (\n@@ -75,7 +75,7 @@\n raise PolicyValidationError((\n 'policy:%s must use a list for actions found:%s' % (\n p.get('name', 'unknown'), type(p['actions']).__name__)))\n- for a in p.get('actions', ()):\n+ for a in p.get('actions', ()) or []:\n if not isinstance(a, element_types):\n raise PolicyValidationError((\n 'policy:%s action must be a mapping/dict found:%s' % (\n", "issue": "NoneType Issue\nI dropped a yaml file into a new OU/SubOU and its not working, though it works in other OUs just fine. Nothing was changed in the file but I am still getting this error, not sure why.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/root/.pyenv/versions/3.9.12/bin/custodian\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/root/.pyenv/versions/3.9.12/lib/python3.9/site-packages/c7n/cli.py\", line 363, in main\r\n command(config)\r\n File \"/root/.pyenv/versions/3.9.12/lib/python3.9/site-packages/c7n/commands.py\", line 219, in validate\r\n structure.validate(data)\r\n File \"/root/.pyenv/versions/3.9.12/lib/python3.9/site-packages/c7n/structure.py\", line 48, in validate\r\n self.validate_policy(p)\r\n File \"/root/.pyenv/versions/3.9.12/lib/python3.9/site-packages/c7n/structure.py\", line 78, in validate_policy\r\n for a in p.get('actions', ()):\r\nTypeError: 'NoneType' object is not iterable\r\n```\n", "before_files": [{"content": "# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\n\nimport json\n\nfrom c7n.exceptions import PolicyValidationError\n\n\nclass StructureParser:\n \"\"\"Provide fast validation and inspection of a policy file.\n\n Intent is to provide more humane validation for top level errors\n instead of printing full schema as error message.\n \"\"\"\n allowed_file_keys = {'vars', 'policies'}\n required_policy_keys = {'name', 'resource'}\n allowed_policy_keys = {'name', 'resource', 'title', 'description', 'mode',\n 'tags', 'max-resources', 'metadata', 'query',\n 'filters', 'actions', 'source', 'conditions',\n # legacy keys subject to deprecation.\n 'region', 'start', 'end', 'tz', 'max-resources-percent',\n 'comments', 'comment'}\n\n def validate(self, data):\n if not isinstance(data, dict):\n raise PolicyValidationError((\n \"Policy file top level data structure \"\n \"should be a mapping/dict, instead found:%s\") % (\n type(data).__name__))\n dkeys = set(data.keys())\n\n extra = dkeys.difference(self.allowed_file_keys)\n if extra:\n raise PolicyValidationError((\n 'Policy files top level keys are %s, found extra: %s' % (\n ', '.join(self.allowed_file_keys),\n ', '.join(extra))))\n\n if 'policies' not in data:\n raise PolicyValidationError(\"`policies` list missing\")\n\n pdata = data.get('policies', [])\n if not isinstance(pdata, list):\n raise PolicyValidationError((\n '`policies` key should be an array/list found: %s' % (\n type(pdata).__name__)))\n for p in pdata:\n self.validate_policy(p)\n\n def validate_policy(self, p):\n if not isinstance(p, dict):\n raise PolicyValidationError((\n 'policy must be a dictionary/mapping found:%s policy:\\n %s' % (\n type(p).__name__, json.dumps(p, indent=2))))\n pkeys = set(p)\n if self.required_policy_keys.difference(pkeys):\n raise PolicyValidationError(\n 'policy missing required keys (name, resource) data:\\n %s' % (\n json.dumps(p, indent=2)))\n if pkeys.difference(self.allowed_policy_keys):\n raise PolicyValidationError(\n 'policy:%s has unknown keys: %s' % (\n p['name'], ','.join(pkeys.difference(self.allowed_policy_keys))))\n if not isinstance(p.get('filters', []), (list, type(None))):\n raise PolicyValidationError((\n 'policy:%s must use a list for filters found:%s' % (\n p['name'], type(p['filters']).__name__)))\n element_types = (dict, str)\n for f in p.get('filters', ()):\n if not isinstance(f, element_types):\n raise PolicyValidationError((\n 'policy:%s filter must be a mapping/dict found:%s' % (\n p.get('name', 'unknown'), type(f).__name__)))\n if not isinstance(p.get('actions', []), (list, type(None))):\n raise PolicyValidationError((\n 'policy:%s must use a list for actions found:%s' % (\n p.get('name', 'unknown'), type(p['actions']).__name__)))\n for a in p.get('actions', ()):\n if not isinstance(a, element_types):\n raise PolicyValidationError((\n 'policy:%s action must be a mapping/dict found:%s' % (\n p.get('name', 'unknown'), type(a).__name__)))\n\n def get_resource_types(self, data):\n resources = set()\n for p in data.get('policies', []):\n rtype = p['resource']\n if '.' not in rtype:\n rtype = 'aws.%s' % rtype\n resources.add(rtype)\n return resources\n", "path": "c7n/structure.py"}]} | 1,835 | 255 |
gh_patches_debug_7875 | rasdani/github-patches | git_diff | getsentry__sentry-python-875 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect parsing of complex urls in django
Sentry is parsing a complex URL as `/api/{version})/log` instead of `/api/{version}/log`.
<img width="207" alt="Screenshot 2020-10-17 at 10 40 47 AM" src="https://user-images.githubusercontent.com/4463796/96328987-70cb1c80-1066-11eb-94a4-ff8e15fb81ed.png">
</issue>
<code>
[start of sentry_sdk/integrations/django/transactions.py]
1 """
2 Copied from raven-python. Used for
3 `DjangoIntegration(transaction_fron="raven_legacy")`.
4 """
5
6 from __future__ import absolute_import
7
8 import re
9
10 from sentry_sdk._types import MYPY
11
12 if MYPY:
13 from django.urls.resolvers import URLResolver
14 from typing import Dict
15 from typing import List
16 from typing import Optional
17 from django.urls.resolvers import URLPattern
18 from typing import Tuple
19 from typing import Union
20 from re import Pattern
21
22 try:
23 from django.urls import get_resolver
24 except ImportError:
25 from django.core.urlresolvers import get_resolver
26
27
28 def get_regex(resolver_or_pattern):
29 # type: (Union[URLPattern, URLResolver]) -> Pattern[str]
30 """Utility method for django's deprecated resolver.regex"""
31 try:
32 regex = resolver_or_pattern.regex
33 except AttributeError:
34 regex = resolver_or_pattern.pattern.regex
35 return regex
36
37
38 class RavenResolver(object):
39 _optional_group_matcher = re.compile(r"\(\?\:([^\)]+)\)")
40 _named_group_matcher = re.compile(r"\(\?P<(\w+)>[^\)]+\)")
41 _non_named_group_matcher = re.compile(r"\([^\)]+\)")
42 # [foo|bar|baz]
43 _either_option_matcher = re.compile(r"\[([^\]]+)\|([^\]]+)\]")
44 _camel_re = re.compile(r"([A-Z]+)([a-z])")
45
46 _cache = {} # type: Dict[URLPattern, str]
47
48 def _simplify(self, pattern):
49 # type: (str) -> str
50 r"""
51 Clean up urlpattern regexes into something readable by humans:
52
53 From:
54 > "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
55
56 To:
57 > "{sport_slug}/athletes/{athlete_slug}/"
58 """
59 # remove optional params
60 # TODO(dcramer): it'd be nice to change these into [%s] but it currently
61 # conflicts with the other rules because we're doing regexp matches
62 # rather than parsing tokens
63 result = self._optional_group_matcher.sub(lambda m: "%s" % m.group(1), pattern)
64
65 # handle named groups first
66 result = self._named_group_matcher.sub(lambda m: "{%s}" % m.group(1), result)
67
68 # handle non-named groups
69 result = self._non_named_group_matcher.sub("{var}", result)
70
71 # handle optional params
72 result = self._either_option_matcher.sub(lambda m: m.group(1), result)
73
74 # clean up any outstanding regex-y characters.
75 result = (
76 result.replace("^", "")
77 .replace("$", "")
78 .replace("?", "")
79 .replace("//", "/")
80 .replace("\\", "")
81 )
82
83 return result
84
85 def _resolve(self, resolver, path, parents=None):
86 # type: (URLResolver, str, Optional[List[URLResolver]]) -> Optional[str]
87
88 match = get_regex(resolver).search(path) # Django < 2.0
89
90 if not match:
91 return None
92
93 if parents is None:
94 parents = [resolver]
95 elif resolver not in parents:
96 parents = parents + [resolver]
97
98 new_path = path[match.end() :]
99 for pattern in resolver.url_patterns:
100 # this is an include()
101 if not pattern.callback:
102 match_ = self._resolve(pattern, new_path, parents)
103 if match_:
104 return match_
105 continue
106 elif not get_regex(pattern).search(new_path):
107 continue
108
109 try:
110 return self._cache[pattern]
111 except KeyError:
112 pass
113
114 prefix = "".join(self._simplify(get_regex(p).pattern) for p in parents)
115 result = prefix + self._simplify(get_regex(pattern).pattern)
116 if not result.startswith("/"):
117 result = "/" + result
118 self._cache[pattern] = result
119 return result
120
121 return None
122
123 def resolve(
124 self,
125 path, # type: str
126 urlconf=None, # type: Union[None, Tuple[URLPattern, URLPattern, URLResolver], Tuple[URLPattern]]
127 ):
128 # type: (...) -> str
129 resolver = get_resolver(urlconf)
130 match = self._resolve(resolver, path)
131 return match or path
132
133
134 LEGACY_RESOLVER = RavenResolver()
135
[end of sentry_sdk/integrations/django/transactions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sentry_sdk/integrations/django/transactions.py b/sentry_sdk/integrations/django/transactions.py
--- a/sentry_sdk/integrations/django/transactions.py
+++ b/sentry_sdk/integrations/django/transactions.py
@@ -37,7 +37,7 @@
class RavenResolver(object):
_optional_group_matcher = re.compile(r"\(\?\:([^\)]+)\)")
- _named_group_matcher = re.compile(r"\(\?P<(\w+)>[^\)]+\)")
+ _named_group_matcher = re.compile(r"\(\?P<(\w+)>[^\)]+\)+")
_non_named_group_matcher = re.compile(r"\([^\)]+\)")
# [foo|bar|baz]
_either_option_matcher = re.compile(r"\[([^\]]+)\|([^\]]+)\]")
| {"golden_diff": "diff --git a/sentry_sdk/integrations/django/transactions.py b/sentry_sdk/integrations/django/transactions.py\n--- a/sentry_sdk/integrations/django/transactions.py\n+++ b/sentry_sdk/integrations/django/transactions.py\n@@ -37,7 +37,7 @@\n \n class RavenResolver(object):\n _optional_group_matcher = re.compile(r\"\\(\\?\\:([^\\)]+)\\)\")\n- _named_group_matcher = re.compile(r\"\\(\\?P<(\\w+)>[^\\)]+\\)\")\n+ _named_group_matcher = re.compile(r\"\\(\\?P<(\\w+)>[^\\)]+\\)+\")\n _non_named_group_matcher = re.compile(r\"\\([^\\)]+\\)\")\n # [foo|bar|baz]\n _either_option_matcher = re.compile(r\"\\[([^\\]]+)\\|([^\\]]+)\\]\")\n", "issue": "Incorrect parsing of complex urls in django\nSentry is parsing a complex URL as `/api/{version})/log` instead of `/api/{version}/log`.\r\n\r\n<img width=\"207\" alt=\"Screenshot 2020-10-17 at 10 40 47 AM\" src=\"https://user-images.githubusercontent.com/4463796/96328987-70cb1c80-1066-11eb-94a4-ff8e15fb81ed.png\">\r\n\n", "before_files": [{"content": "\"\"\"\nCopied from raven-python. Used for\n`DjangoIntegration(transaction_fron=\"raven_legacy\")`.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport re\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from django.urls.resolvers import URLResolver\n from typing import Dict\n from typing import List\n from typing import Optional\n from django.urls.resolvers import URLPattern\n from typing import Tuple\n from typing import Union\n from re import Pattern\n\ntry:\n from django.urls import get_resolver\nexcept ImportError:\n from django.core.urlresolvers import get_resolver\n\n\ndef get_regex(resolver_or_pattern):\n # type: (Union[URLPattern, URLResolver]) -> Pattern[str]\n \"\"\"Utility method for django's deprecated resolver.regex\"\"\"\n try:\n regex = resolver_or_pattern.regex\n except AttributeError:\n regex = resolver_or_pattern.pattern.regex\n return regex\n\n\nclass RavenResolver(object):\n _optional_group_matcher = re.compile(r\"\\(\\?\\:([^\\)]+)\\)\")\n _named_group_matcher = re.compile(r\"\\(\\?P<(\\w+)>[^\\)]+\\)\")\n _non_named_group_matcher = re.compile(r\"\\([^\\)]+\\)\")\n # [foo|bar|baz]\n _either_option_matcher = re.compile(r\"\\[([^\\]]+)\\|([^\\]]+)\\]\")\n _camel_re = re.compile(r\"([A-Z]+)([a-z])\")\n\n _cache = {} # type: Dict[URLPattern, str]\n\n def _simplify(self, pattern):\n # type: (str) -> str\n r\"\"\"\n Clean up urlpattern regexes into something readable by humans:\n\n From:\n > \"^(?P<sport_slug>\\w+)/athletes/(?P<athlete_slug>\\w+)/$\"\n\n To:\n > \"{sport_slug}/athletes/{athlete_slug}/\"\n \"\"\"\n # remove optional params\n # TODO(dcramer): it'd be nice to change these into [%s] but it currently\n # conflicts with the other rules because we're doing regexp matches\n # rather than parsing tokens\n result = self._optional_group_matcher.sub(lambda m: \"%s\" % m.group(1), pattern)\n\n # handle named groups first\n result = self._named_group_matcher.sub(lambda m: \"{%s}\" % m.group(1), result)\n\n # handle non-named groups\n result = self._non_named_group_matcher.sub(\"{var}\", result)\n\n # handle optional params\n result = self._either_option_matcher.sub(lambda m: m.group(1), result)\n\n # clean up any outstanding regex-y characters.\n result = (\n result.replace(\"^\", \"\")\n .replace(\"$\", \"\")\n .replace(\"?\", \"\")\n .replace(\"//\", \"/\")\n .replace(\"\\\\\", \"\")\n )\n\n return result\n\n def _resolve(self, resolver, path, parents=None):\n # type: (URLResolver, str, Optional[List[URLResolver]]) -> Optional[str]\n\n match = get_regex(resolver).search(path) # Django < 2.0\n\n if not match:\n return None\n\n if parents is None:\n parents = [resolver]\n elif resolver not in parents:\n parents = parents + [resolver]\n\n new_path = path[match.end() :]\n for pattern in resolver.url_patterns:\n # this is an include()\n if not pattern.callback:\n match_ = self._resolve(pattern, new_path, parents)\n if match_:\n return match_\n continue\n elif not get_regex(pattern).search(new_path):\n continue\n\n try:\n return self._cache[pattern]\n except KeyError:\n pass\n\n prefix = \"\".join(self._simplify(get_regex(p).pattern) for p in parents)\n result = prefix + self._simplify(get_regex(pattern).pattern)\n if not result.startswith(\"/\"):\n result = \"/\" + result\n self._cache[pattern] = result\n return result\n\n return None\n\n def resolve(\n self,\n path, # type: str\n urlconf=None, # type: Union[None, Tuple[URLPattern, URLPattern, URLResolver], Tuple[URLPattern]]\n ):\n # type: (...) -> str\n resolver = get_resolver(urlconf)\n match = self._resolve(resolver, path)\n return match or path\n\n\nLEGACY_RESOLVER = RavenResolver()\n", "path": "sentry_sdk/integrations/django/transactions.py"}]} | 1,948 | 193 |
gh_patches_debug_5874 | rasdani/github-patches | git_diff | python-poetry__poetry-1862 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Document the --no-root option
<!--
Hi there! Thank you for wanting to make Poetry better.
Before you submit this; let's make sure of a few things.
Please make sure the following boxes are ticked if they are correct.
If not, please try and fulfill these first.
-->
<!-- Checked checkbox should look like this: [x] -->
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] I have searched the [documentation](https://python-poetry.org/docs/) and believe that my question is not covered.
## Feature Request
<!-- Now feel free to write your idea for improvement. Thanks again 🙌 ❤️ -->
The `--no-root` option described in https://github.com/python-poetry/poetry/issues/1525 works fine for installation. Unfortunately I found it only when looking for duplicate issues before raising this. `poetry help install` does not describe that option.
Please add it to the `help install` output.
Document the --no-root option
<!--
Hi there! Thank you for wanting to make Poetry better.
Before you submit this; let's make sure of a few things.
Please make sure the following boxes are ticked if they are correct.
If not, please try and fulfill these first.
-->
<!-- Checked checkbox should look like this: [x] -->
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] I have searched the [documentation](https://python-poetry.org/docs/) and believe that my question is not covered.
## Feature Request
<!-- Now feel free to write your idea for improvement. Thanks again 🙌 ❤️ -->
The `--no-root` option described in https://github.com/python-poetry/poetry/issues/1525 works fine for installation. Unfortunately I found it only when looking for duplicate issues before raising this. `poetry help install` does not describe that option.
Please add it to the `help install` output.
</issue>
<code>
[start of poetry/console/commands/install.py]
1 from cleo import option
2
3 from .env_command import EnvCommand
4
5
6 class InstallCommand(EnvCommand):
7
8 name = "install"
9 description = "Installs the project dependencies."
10
11 options = [
12 option("no-dev", None, "Do not install the development dependencies."),
13 option(
14 "no-root", None, "Do not install the root package (the current project)."
15 ),
16 option(
17 "dry-run",
18 None,
19 "Output the operations but do not execute anything "
20 "(implicitly enables --verbose).",
21 ),
22 option(
23 "extras",
24 "E",
25 "Extra sets of dependencies to install.",
26 flag=False,
27 multiple=True,
28 ),
29 ]
30
31 help = """The <info>install</info> command reads the <comment>poetry.lock</> file from
32 the current directory, processes it, and downloads and installs all the
33 libraries and dependencies outlined in that file. If the file does not
34 exist it will look for <comment>pyproject.toml</> and do the same.
35
36 <info>poetry install</info>
37 """
38
39 _loggers = ["poetry.repositories.pypi_repository"]
40
41 def handle(self):
42 from clikit.io import NullIO
43 from poetry.installation.installer import Installer
44 from poetry.masonry.builders import EditableBuilder
45 from poetry.masonry.utils.module import ModuleOrPackageNotFound
46
47 installer = Installer(
48 self.io, self.env, self.poetry.package, self.poetry.locker, self.poetry.pool
49 )
50
51 extras = []
52 for extra in self.option("extras"):
53 if " " in extra:
54 extras += [e.strip() for e in extra.split(" ")]
55 else:
56 extras.append(extra)
57
58 installer.extras(extras)
59 installer.dev_mode(not self.option("no-dev"))
60 installer.dry_run(self.option("dry-run"))
61 installer.verbose(self.option("verbose"))
62
63 return_code = installer.run()
64
65 if return_code != 0:
66 return return_code
67
68 if self.option("no-root"):
69 return 0
70
71 try:
72 builder = EditableBuilder(self.poetry, self._env, NullIO())
73 except ModuleOrPackageNotFound:
74 # This is likely due to the fact that the project is an application
75 # not following the structure expected by Poetry
76 # If this is a true error it will be picked up later by build anyway.
77 return 0
78
79 self.line(
80 " - Installing <c1>{}</c1> (<b>{}</b>)".format(
81 self.poetry.package.pretty_name, self.poetry.package.pretty_version
82 )
83 )
84
85 if self.option("dry-run"):
86 return 0
87
88 builder.build()
89
90 return 0
91
[end of poetry/console/commands/install.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/poetry/console/commands/install.py b/poetry/console/commands/install.py
--- a/poetry/console/commands/install.py
+++ b/poetry/console/commands/install.py
@@ -34,6 +34,12 @@
exist it will look for <comment>pyproject.toml</> and do the same.
<info>poetry install</info>
+
+By default, the above command will also install the current project. To install only the
+dependencies and not including the current project, run the command with the
+<info>--no-root</info> option like below:
+
+<info> poetry install --no-root</info>
"""
_loggers = ["poetry.repositories.pypi_repository"]
| {"golden_diff": "diff --git a/poetry/console/commands/install.py b/poetry/console/commands/install.py\n--- a/poetry/console/commands/install.py\n+++ b/poetry/console/commands/install.py\n@@ -34,6 +34,12 @@\n exist it will look for <comment>pyproject.toml</> and do the same.\n \n <info>poetry install</info>\n+\n+By default, the above command will also install the current project. To install only the\n+dependencies and not including the current project, run the command with the\n+<info>--no-root</info> option like below:\n+\n+<info> poetry install --no-root</info>\n \"\"\"\n \n _loggers = [\"poetry.repositories.pypi_repository\"]\n", "issue": "Document the --no-root option\n<!--\r\n Hi there! Thank you for wanting to make Poetry better.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] I have searched the [documentation](https://python-poetry.org/docs/) and believe that my question is not covered.\r\n\r\n## Feature Request\r\n<!-- Now feel free to write your idea for improvement. Thanks again \ud83d\ude4c \u2764\ufe0f -->\r\nThe `--no-root` option described in https://github.com/python-poetry/poetry/issues/1525 works fine for installation. Unfortunately I found it only when looking for duplicate issues before raising this. `poetry help install` does not describe that option.\r\n\r\nPlease add it to the `help install` output.\nDocument the --no-root option\n<!--\r\n Hi there! Thank you for wanting to make Poetry better.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] I have searched the [documentation](https://python-poetry.org/docs/) and believe that my question is not covered.\r\n\r\n## Feature Request\r\n<!-- Now feel free to write your idea for improvement. Thanks again \ud83d\ude4c \u2764\ufe0f -->\r\nThe `--no-root` option described in https://github.com/python-poetry/poetry/issues/1525 works fine for installation. Unfortunately I found it only when looking for duplicate issues before raising this. `poetry help install` does not describe that option.\r\n\r\nPlease add it to the `help install` output.\n", "before_files": [{"content": "from cleo import option\n\nfrom .env_command import EnvCommand\n\n\nclass InstallCommand(EnvCommand):\n\n name = \"install\"\n description = \"Installs the project dependencies.\"\n\n options = [\n option(\"no-dev\", None, \"Do not install the development dependencies.\"),\n option(\n \"no-root\", None, \"Do not install the root package (the current project).\"\n ),\n option(\n \"dry-run\",\n None,\n \"Output the operations but do not execute anything \"\n \"(implicitly enables --verbose).\",\n ),\n option(\n \"extras\",\n \"E\",\n \"Extra sets of dependencies to install.\",\n flag=False,\n multiple=True,\n ),\n ]\n\n help = \"\"\"The <info>install</info> command reads the <comment>poetry.lock</> file from\nthe current directory, processes it, and downloads and installs all the\nlibraries and dependencies outlined in that file. If the file does not\nexist it will look for <comment>pyproject.toml</> and do the same.\n\n<info>poetry install</info>\n\"\"\"\n\n _loggers = [\"poetry.repositories.pypi_repository\"]\n\n def handle(self):\n from clikit.io import NullIO\n from poetry.installation.installer import Installer\n from poetry.masonry.builders import EditableBuilder\n from poetry.masonry.utils.module import ModuleOrPackageNotFound\n\n installer = Installer(\n self.io, self.env, self.poetry.package, self.poetry.locker, self.poetry.pool\n )\n\n extras = []\n for extra in self.option(\"extras\"):\n if \" \" in extra:\n extras += [e.strip() for e in extra.split(\" \")]\n else:\n extras.append(extra)\n\n installer.extras(extras)\n installer.dev_mode(not self.option(\"no-dev\"))\n installer.dry_run(self.option(\"dry-run\"))\n installer.verbose(self.option(\"verbose\"))\n\n return_code = installer.run()\n\n if return_code != 0:\n return return_code\n\n if self.option(\"no-root\"):\n return 0\n\n try:\n builder = EditableBuilder(self.poetry, self._env, NullIO())\n except ModuleOrPackageNotFound:\n # This is likely due to the fact that the project is an application\n # not following the structure expected by Poetry\n # If this is a true error it will be picked up later by build anyway.\n return 0\n\n self.line(\n \" - Installing <c1>{}</c1> (<b>{}</b>)\".format(\n self.poetry.package.pretty_name, self.poetry.package.pretty_version\n )\n )\n\n if self.option(\"dry-run\"):\n return 0\n\n builder.build()\n\n return 0\n", "path": "poetry/console/commands/install.py"}]} | 1,768 | 163 |
gh_patches_debug_1187 | rasdani/github-patches | git_diff | freedomofpress__securedrop-6051 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Alembic operations fail with multiple head revisions
## Description
All Alembic operations fail with Alembic error:
ERROR [alembic.util.messaging] Multiple head revisions are present for given argument 'head'; please specify a specific target revision, '<branchname>@head' to narrow to a specific head, or 'heads' for all heads
Cf. consistent recent failures of CI jobs `app-tests` and `staging-test-with-rebase` since #5974.
## Steps to Reproduce
`make test` on `develop`; open or push to a PR; etc.
## Expected Behavior
Alembic operations succeed and Alembic-based tests pass.
## Actual Behavior
All Alembic operations and tests fail with Alembic error:
ERROR [alembic.util.messaging] Multiple head revisions are present for given argument 'head'; please specify a specific target revision, '<branchname>@head' to narrow to a specific head, or 'heads' for all heads
## Comments
This is essentially an Alembic-level merge-conflict. PR forthcoming with the one-line fix.
</issue>
<code>
[start of securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py]
1 """unique_index_for_instanceconfig_valid_until
2
3 Revision ID: 1ddb81fb88c2
4 Revises: 92fba0be98e9
5 Create Date: 2021-06-04 17:28:25.725563
6
7 """
8 from alembic import op
9 import sqlalchemy as sa
10
11
12 # revision identifiers, used by Alembic.
13 revision = '1ddb81fb88c2'
14 down_revision = '92fba0be98e9'
15 branch_labels = None
16 depends_on = None
17
18
19 def upgrade():
20 # ### commands auto generated by Alembic - please adjust! ###
21 with op.batch_alter_table('instance_config', schema=None) as batch_op:
22 batch_op.create_index('ix_one_active_instance_config', [sa.text('valid_until IS NULL')], unique=True, sqlite_where=sa.text('valid_until IS NULL'))
23
24 # ### end Alembic commands ###
25
26
27 def downgrade():
28 # ### commands auto generated by Alembic - please adjust! ###
29 with op.batch_alter_table('instance_config', schema=None) as batch_op:
30 batch_op.drop_index('ix_one_active_instance_config')
31
32 # ### end Alembic commands ###
33
[end of securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py b/securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py
--- a/securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py
+++ b/securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py
@@ -11,7 +11,7 @@
# revision identifiers, used by Alembic.
revision = '1ddb81fb88c2'
-down_revision = '92fba0be98e9'
+down_revision = 'b060f38c0c31'
branch_labels = None
depends_on = None
| {"golden_diff": "diff --git a/securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py b/securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py\n--- a/securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py\n+++ b/securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py\n@@ -11,7 +11,7 @@\n \n # revision identifiers, used by Alembic.\n revision = '1ddb81fb88c2'\n-down_revision = '92fba0be98e9'\n+down_revision = 'b060f38c0c31'\n branch_labels = None\n depends_on = None\n", "issue": "Alembic operations fail with multiple head revisions\n## Description\r\n\r\nAll Alembic operations fail with Alembic error:\r\n\r\n ERROR [alembic.util.messaging] Multiple head revisions are present for given argument 'head'; please specify a specific target revision, '<branchname>@head' to narrow to a specific head, or 'heads' for all heads\r\n\r\nCf. consistent recent failures of CI jobs `app-tests` and `staging-test-with-rebase` since #5974.\r\n\r\n## Steps to Reproduce\r\n\r\n`make test` on `develop`; open or push to a PR; etc.\r\n\r\n## Expected Behavior\r\n\r\nAlembic operations succeed and Alembic-based tests pass.\r\n\r\n## Actual Behavior\r\n\r\nAll Alembic operations and tests fail with Alembic error:\r\n\r\n ERROR [alembic.util.messaging] Multiple head revisions are present for given argument 'head'; please specify a specific target revision, '<branchname>@head' to narrow to a specific head, or 'heads' for all heads\r\n\r\n## Comments\r\n\r\nThis is essentially an Alembic-level merge-conflict. PR forthcoming with the one-line fix.\n", "before_files": [{"content": "\"\"\"unique_index_for_instanceconfig_valid_until\n\nRevision ID: 1ddb81fb88c2\nRevises: 92fba0be98e9\nCreate Date: 2021-06-04 17:28:25.725563\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '1ddb81fb88c2'\ndown_revision = '92fba0be98e9'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('instance_config', schema=None) as batch_op:\n batch_op.create_index('ix_one_active_instance_config', [sa.text('valid_until IS NULL')], unique=True, sqlite_where=sa.text('valid_until IS NULL'))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('instance_config', schema=None) as batch_op:\n batch_op.drop_index('ix_one_active_instance_config')\n\n # ### end Alembic commands ###\n", "path": "securedrop/alembic/versions/1ddb81fb88c2_unique_index_for_instanceconfig_valid_.py"}]} | 1,146 | 200 |
gh_patches_debug_14171 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-373 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
When worker/master image creation failed, client should fail instead of trying to launch master.
</issue>
<code>
[start of elasticdl/client/client.py]
1 import argparse
2 import os
3 import inspect
4 import tempfile
5 import time
6 import getpass
7 import sys
8 from string import Template
9 import docker
10 import yaml
11 from kubernetes.client.apis import core_v1_api
12 from kubernetes import config
13
14
15 def _m_file_in_docker(model_file):
16 return "/model/" + os.path.basename(model_file)
17
18 def _build_docker_image(
19 m_file, image_name, image_base="elasticdl:dev",
20 repository=None
21 ):
22 DOCKER_TEMPLATE = """
23 FROM {}
24 COPY {} {}
25 """
26
27 with tempfile.NamedTemporaryFile(mode="w+", delete=False) as df:
28 df.write(DOCKER_TEMPLATE.format(image_base, m_file, _m_file_in_docker(m_file)))
29
30 client = docker.APIClient(base_url="unix://var/run/docker.sock")
31 print("===== Building Docker Image =====")
32 for line in client.build(
33 dockerfile=df.name, path=".", rm=True, tag=image_name, decode=True
34 ):
35 text = line.get("stream", None)
36 if text:
37 sys.stdout.write(text)
38 sys.stdout.flush()
39 print("===== Docker Image Built =====")
40 if repository != None:
41 for line in client.push(image_name, stream=True, decode=True):
42 print(line)
43
44 def _gen_master_def(image_name, model_file, job_name, argv):
45 master_yaml = """
46 apiVersion: v1
47 kind: Pod
48 metadata:
49 name: "elasticdl-master-{job_name}"
50 labels:
51 purpose: test-command
52 spec:
53 containers:
54 - name: "elasticdl-master-{job_name}"
55 image: "{image_name}"
56 command: ["python"]
57 args: [
58 "-m", "elasticdl.master.main",
59 "--job_name", "{job_name}",
60 "--worker_image", "{image_name}",
61 "--model_file", "{m_file}"
62 ]
63 imagePullPolicy: IfNotPresent
64 env:
65 - name: MY_POD_IP
66 valueFrom:
67 fieldRef:
68 fieldPath: status.podIP
69 restartPolicy: Never
70 """ .format(m_file=_m_file_in_docker(model_file), image_name=image_name, job_name=job_name)
71
72 master_def = yaml.safe_load(master_yaml)
73
74 # Build master arguments
75 master_def['spec']['containers'][0]['args'].extend(argv)
76 return master_def
77
78 def _submit(image_name, model_file, job_name, argv):
79 master_def = _gen_master_def(image_name, model_file, job_name, argv)
80 config.load_kube_config()
81 api = core_v1_api.CoreV1Api()
82 resp = api.create_namespaced_pod(body=master_def, namespace="default")
83 print("Master launched. status='%s'" % str(resp.status))
84
85 def main():
86 parser = argparse.ArgumentParser(description="ElasticDL Client")
87 # Rewrite model_file argument and pass all other arguments to master.
88 parser.add_argument("--model_file", help="Path to Model file", required=True)
89 parser.add_argument("--image_base", help="Base image containing elasticdl runtime environment.", required=True)
90 parser.add_argument("--repository", help="The repository to push docker image to.")
91 parser.add_argument("--job_name", help="ElasticDL job name", required=True)
92 args, argv = parser.parse_known_args()
93
94 job_name = args.job_name + "-" + str(int(round(time.time() * 1000)))
95 image_name = args.image_base + '_' + job_name
96 _build_docker_image(args.model_file, image_name, image_base=args.image_base,
97 repository=args.repository)
98 _submit(image_name, args.model_file, job_name, argv)
99
100
101 if __name__ == "__main__":
102 main()
103
[end of elasticdl/client/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/client/client.py b/elasticdl/client/client.py
--- a/elasticdl/client/client.py
+++ b/elasticdl/client/client.py
@@ -32,12 +32,14 @@
for line in client.build(
dockerfile=df.name, path=".", rm=True, tag=image_name, decode=True
):
+ if "error" in line:
+ raise RuntimeError("Docker image build failure: " % line["error"])
text = line.get("stream", None)
if text:
sys.stdout.write(text)
sys.stdout.flush()
print("===== Docker Image Built =====")
- if repository != None:
+ if repository is not None:
for line in client.push(image_name, stream=True, decode=True):
print(line)
| {"golden_diff": "diff --git a/elasticdl/client/client.py b/elasticdl/client/client.py\n--- a/elasticdl/client/client.py\n+++ b/elasticdl/client/client.py\n@@ -32,12 +32,14 @@\n for line in client.build(\n dockerfile=df.name, path=\".\", rm=True, tag=image_name, decode=True\n ):\n+ if \"error\" in line:\n+ raise RuntimeError(\"Docker image build failure: \" % line[\"error\"])\n text = line.get(\"stream\", None)\n if text:\n sys.stdout.write(text)\n sys.stdout.flush()\n print(\"===== Docker Image Built =====\")\n- if repository != None:\n+ if repository is not None:\n for line in client.push(image_name, stream=True, decode=True):\n print(line)\n", "issue": "When worker/master image creation failed, client should fail instead of trying to launch master.\n\n", "before_files": [{"content": "import argparse\nimport os\nimport inspect\nimport tempfile\nimport time\nimport getpass\nimport sys\nfrom string import Template\nimport docker\nimport yaml\nfrom kubernetes.client.apis import core_v1_api\nfrom kubernetes import config\n\n\ndef _m_file_in_docker(model_file):\n return \"/model/\" + os.path.basename(model_file)\n\ndef _build_docker_image(\n m_file, image_name, image_base=\"elasticdl:dev\",\n repository=None\n):\n DOCKER_TEMPLATE = \"\"\"\nFROM {}\nCOPY {} {}\n\"\"\"\n\n with tempfile.NamedTemporaryFile(mode=\"w+\", delete=False) as df:\n df.write(DOCKER_TEMPLATE.format(image_base, m_file, _m_file_in_docker(m_file)))\n\n client = docker.APIClient(base_url=\"unix://var/run/docker.sock\")\n print(\"===== Building Docker Image =====\")\n for line in client.build(\n dockerfile=df.name, path=\".\", rm=True, tag=image_name, decode=True\n ):\n text = line.get(\"stream\", None)\n if text:\n sys.stdout.write(text)\n sys.stdout.flush()\n print(\"===== Docker Image Built =====\")\n if repository != None:\n for line in client.push(image_name, stream=True, decode=True):\n print(line)\n\ndef _gen_master_def(image_name, model_file, job_name, argv):\n master_yaml = \"\"\"\napiVersion: v1\nkind: Pod\nmetadata:\n name: \"elasticdl-master-{job_name}\"\n labels:\n purpose: test-command\nspec:\n containers:\n - name: \"elasticdl-master-{job_name}\"\n image: \"{image_name}\"\n command: [\"python\"]\n args: [\n \"-m\", \"elasticdl.master.main\",\n \"--job_name\", \"{job_name}\",\n \"--worker_image\", \"{image_name}\",\n \"--model_file\", \"{m_file}\"\n ]\n imagePullPolicy: IfNotPresent \n env:\n - name: MY_POD_IP\n valueFrom:\n fieldRef:\n fieldPath: status.podIP\n restartPolicy: Never\n\"\"\" .format(m_file=_m_file_in_docker(model_file), image_name=image_name, job_name=job_name)\n\n master_def = yaml.safe_load(master_yaml)\n\n # Build master arguments\n master_def['spec']['containers'][0]['args'].extend(argv)\n return master_def\n\ndef _submit(image_name, model_file, job_name, argv):\n master_def = _gen_master_def(image_name, model_file, job_name, argv)\n config.load_kube_config()\n api = core_v1_api.CoreV1Api()\n resp = api.create_namespaced_pod(body=master_def, namespace=\"default\")\n print(\"Master launched. status='%s'\" % str(resp.status))\n\ndef main():\n parser = argparse.ArgumentParser(description=\"ElasticDL Client\")\n # Rewrite model_file argument and pass all other arguments to master.\n parser.add_argument(\"--model_file\", help=\"Path to Model file\", required=True)\n parser.add_argument(\"--image_base\", help=\"Base image containing elasticdl runtime environment.\", required=True)\n parser.add_argument(\"--repository\", help=\"The repository to push docker image to.\")\n parser.add_argument(\"--job_name\", help=\"ElasticDL job name\", required=True)\n args, argv = parser.parse_known_args()\n\n job_name = args.job_name + \"-\" + str(int(round(time.time() * 1000)))\n image_name = args.image_base + '_' + job_name \n _build_docker_image(args.model_file, image_name, image_base=args.image_base,\n repository=args.repository)\n _submit(image_name, args.model_file, job_name, argv)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/client/client.py"}]} | 1,545 | 173 |
gh_patches_debug_9153 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-2101 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bash_version example doesn't work with json format
Running `insights run -p examples/rules -f json` results in a traceback because the `bash_version` rule puts an `InstalledRpm` object into its response:
```
TypeError: Object of type 'InstalledRpm' is not JSON serializable
```
</issue>
<code>
[start of examples/rules/bash_version.py]
1 """
2 Bash Version
3 ============
4
5 This is a simple rule and can be run against the local host
6 using the following command::
7
8 $ insights-run -p examples.rules.bash_version
9
10 or from the examples/rules directory::
11
12 $ python sample_rules.py
13 """
14 from insights.core.plugins import make_pass, rule
15 from insights.parsers.installed_rpms import InstalledRpms
16
17 KEY = "BASH_VERSION"
18
19 CONTENT = "Bash RPM Version: {{ bash_version }}"
20
21
22 @rule(InstalledRpms)
23 def report(rpms):
24 bash_ver = rpms.get_max('bash')
25 return make_pass(KEY, bash_version=bash_ver)
26
[end of examples/rules/bash_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/rules/bash_version.py b/examples/rules/bash_version.py
--- a/examples/rules/bash_version.py
+++ b/examples/rules/bash_version.py
@@ -11,7 +11,7 @@
$ python sample_rules.py
"""
-from insights.core.plugins import make_pass, rule
+from insights.core.plugins import make_info, rule
from insights.parsers.installed_rpms import InstalledRpms
KEY = "BASH_VERSION"
@@ -21,5 +21,5 @@
@rule(InstalledRpms)
def report(rpms):
- bash_ver = rpms.get_max('bash')
- return make_pass(KEY, bash_version=bash_ver)
+ bash = rpms.get_max('bash')
+ return make_info(KEY, bash_version=bash.nvra)
| {"golden_diff": "diff --git a/examples/rules/bash_version.py b/examples/rules/bash_version.py\n--- a/examples/rules/bash_version.py\n+++ b/examples/rules/bash_version.py\n@@ -11,7 +11,7 @@\n \n $ python sample_rules.py\n \"\"\"\n-from insights.core.plugins import make_pass, rule\n+from insights.core.plugins import make_info, rule\n from insights.parsers.installed_rpms import InstalledRpms\n \n KEY = \"BASH_VERSION\"\n@@ -21,5 +21,5 @@\n \n @rule(InstalledRpms)\n def report(rpms):\n- bash_ver = rpms.get_max('bash')\n- return make_pass(KEY, bash_version=bash_ver)\n+ bash = rpms.get_max('bash')\n+ return make_info(KEY, bash_version=bash.nvra)\n", "issue": "bash_version example doesn't work with json format\nRunning `insights run -p examples/rules -f json` results in a traceback because the `bash_version` rule puts an `InstalledRpm` object into its response:\r\n\r\n```\r\nTypeError: Object of type 'InstalledRpm' is not JSON serializable\r\n```\n", "before_files": [{"content": "\"\"\"\nBash Version\n============\n\nThis is a simple rule and can be run against the local host\nusing the following command::\n\n$ insights-run -p examples.rules.bash_version\n\nor from the examples/rules directory::\n\n$ python sample_rules.py\n\"\"\"\nfrom insights.core.plugins import make_pass, rule\nfrom insights.parsers.installed_rpms import InstalledRpms\n\nKEY = \"BASH_VERSION\"\n\nCONTENT = \"Bash RPM Version: {{ bash_version }}\"\n\n\n@rule(InstalledRpms)\ndef report(rpms):\n bash_ver = rpms.get_max('bash')\n return make_pass(KEY, bash_version=bash_ver)\n", "path": "examples/rules/bash_version.py"}]} | 778 | 168 |
gh_patches_debug_19148 | rasdani/github-patches | git_diff | coala__coala-bears-1422 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Most YAML documents use document starts (---)
Hi,
I am the creator of yamllint, the linter coala uses for YAML.
Since #965 was merged three months ago, coala fails on many projects like Ansible, OpenStack and even yamllint itself, because coala doesn't accept document start markers (`---`) anymore.
Document start markers are commonly used, and required when declaring multiple documents in a single `.yaml` file (see [the spec](http://yaml.org/spec/1.2/spec.html#id2800132)).
The proposed fix in the original issue (#923) was to disable the rule, but the implemented fix (#965) made document starts forbidden.
My opinion is that coala should either require document starts, or disable the rule by default.
</issue>
<code>
[start of bears/yaml/YAMLLintBear.py]
1 from coalib.bearlib.abstractions.Linter import linter
2 from dependency_management.requirements.PipRequirement import PipRequirement
3 import yaml
4
5
6 @linter(executable='yamllint',
7 output_format='regex',
8 output_regex=r'.+:(?P<line>\d+):(?P<column>\d+): '
9 r'\[(?P<severity>error|warning)\] (?P<message>.+)')
10 class YAMLLintBear:
11 """
12 Check yaml code for errors and possible problems.
13
14 You can read more about capabilities at
15 <http://yamllint.readthedocs.org/en/latest/rules.html>.
16 """
17
18 LANGUAGES = {'YAML'}
19 REQUIREMENTS = {PipRequirement('yamllint', '1.5')}
20 AUTHORS = {'The coala developers'}
21 AUTHORS_EMAILS = {'[email protected]'}
22 LICENSE = 'AGPL-3.0'
23 CAN_DETECT = {'Syntax', 'Formatting'}
24
25 @staticmethod
26 def generate_config(filename, file,
27 document_start: bool=False):
28 """
29 :param document_start:
30 Use this rule to require or forbid the use of document start
31 marker (---).
32 """
33 yamllint_configs = {
34 'extends': 'default',
35 'rules': {
36 'document-start': {
37 'present': False
38 }
39 }
40 }
41 if document_start:
42 yamllint_configs['rules']['document-start']['present'] = True
43
44 return yaml.dump(yamllint_configs)
45
46 @staticmethod
47 def create_arguments(filename, file, config_file, yamllint_config: str=''):
48 """
49 :param yamllint_config: Path to a custom configuration file.
50 """
51 args = ('-f', 'parsable', filename)
52 if yamllint_config:
53 args += ('--config-file=' + yamllint_config,)
54 else:
55 args += ('--config-file=' + config_file,)
56 return args
57
[end of bears/yaml/YAMLLintBear.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bears/yaml/YAMLLintBear.py b/bears/yaml/YAMLLintBear.py
--- a/bears/yaml/YAMLLintBear.py
+++ b/bears/yaml/YAMLLintBear.py
@@ -24,7 +24,7 @@
@staticmethod
def generate_config(filename, file,
- document_start: bool=False):
+ document_start: bool=None):
"""
:param document_start:
Use this rule to require or forbid the use of document start
@@ -33,13 +33,10 @@
yamllint_configs = {
'extends': 'default',
'rules': {
- 'document-start': {
- 'present': False
- }
- }
+ 'document-start': 'disable' if document_start is None
+ else {'present': document_start},
+ },
}
- if document_start:
- yamllint_configs['rules']['document-start']['present'] = True
return yaml.dump(yamllint_configs)
| {"golden_diff": "diff --git a/bears/yaml/YAMLLintBear.py b/bears/yaml/YAMLLintBear.py\n--- a/bears/yaml/YAMLLintBear.py\n+++ b/bears/yaml/YAMLLintBear.py\n@@ -24,7 +24,7 @@\n \n @staticmethod\n def generate_config(filename, file,\n- document_start: bool=False):\n+ document_start: bool=None):\n \"\"\"\n :param document_start:\n Use this rule to require or forbid the use of document start\n@@ -33,13 +33,10 @@\n yamllint_configs = {\n 'extends': 'default',\n 'rules': {\n- 'document-start': {\n- 'present': False\n- }\n- }\n+ 'document-start': 'disable' if document_start is None\n+ else {'present': document_start},\n+ },\n }\n- if document_start:\n- yamllint_configs['rules']['document-start']['present'] = True\n \n return yaml.dump(yamllint_configs)\n", "issue": "Most YAML documents use document starts (---)\nHi,\r\n\r\nI am the creator of yamllint, the linter coala uses for YAML.\r\n\r\nSince #965 was merged three months ago, coala fails on many projects like Ansible, OpenStack and even yamllint itself, because coala doesn't accept document start markers (`---`) anymore.\r\n\r\nDocument start markers are commonly used, and required when declaring multiple documents in a single `.yaml` file (see [the spec](http://yaml.org/spec/1.2/spec.html#id2800132)).\r\n\r\nThe proposed fix in the original issue (#923) was to disable the rule, but the implemented fix (#965) made document starts forbidden.\r\n\r\nMy opinion is that coala should either require document starts, or disable the rule by default.\n", "before_files": [{"content": "from coalib.bearlib.abstractions.Linter import linter\nfrom dependency_management.requirements.PipRequirement import PipRequirement\nimport yaml\n\n\n@linter(executable='yamllint',\n output_format='regex',\n output_regex=r'.+:(?P<line>\\d+):(?P<column>\\d+): '\n r'\\[(?P<severity>error|warning)\\] (?P<message>.+)')\nclass YAMLLintBear:\n \"\"\"\n Check yaml code for errors and possible problems.\n\n You can read more about capabilities at\n <http://yamllint.readthedocs.org/en/latest/rules.html>.\n \"\"\"\n\n LANGUAGES = {'YAML'}\n REQUIREMENTS = {PipRequirement('yamllint', '1.5')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_DETECT = {'Syntax', 'Formatting'}\n\n @staticmethod\n def generate_config(filename, file,\n document_start: bool=False):\n \"\"\"\n :param document_start:\n Use this rule to require or forbid the use of document start\n marker (---).\n \"\"\"\n yamllint_configs = {\n 'extends': 'default',\n 'rules': {\n 'document-start': {\n 'present': False\n }\n }\n }\n if document_start:\n yamllint_configs['rules']['document-start']['present'] = True\n\n return yaml.dump(yamllint_configs)\n\n @staticmethod\n def create_arguments(filename, file, config_file, yamllint_config: str=''):\n \"\"\"\n :param yamllint_config: Path to a custom configuration file.\n \"\"\"\n args = ('-f', 'parsable', filename)\n if yamllint_config:\n args += ('--config-file=' + yamllint_config,)\n else:\n args += ('--config-file=' + config_file,)\n return args\n", "path": "bears/yaml/YAMLLintBear.py"}]} | 1,256 | 233 |
gh_patches_debug_22852 | rasdani/github-patches | git_diff | python__mypy-3330 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mypy_extensions is listed as owned by David Foster
See https://github.com/python/mypy/blob/master/extensions/setup.py#L37
David Foster did indeed create the first version but I presume he doesn't want to be bothered about the subsequent additions?
We should probably change this to "The mypy developers" -- but where to point the email? Maybe it can be omitted. The url might also better point to GitHub.
Attn: @davidfstr
</issue>
<code>
[start of extensions/setup.py]
1 #!/usr/bin/env python
2
3 # NOTE: This package must support Python 2.7 in addition to Python 3.x
4
5 from distutils.core import setup
6
7 version = '0.2.0-dev'
8 description = 'Experimental type system extensions for programs checked with the mypy typechecker.'
9 long_description = '''
10 Mypy Extensions
11 ===============
12
13 The "mypy_extensions" module defines experimental extensions to the
14 standard "typing" module that are supported by the mypy typechecker.
15 '''.lstrip()
16
17 classifiers = [
18 'Development Status :: 2 - Pre-Alpha',
19 'Environment :: Console',
20 'Intended Audience :: Developers',
21 'License :: OSI Approved :: MIT License',
22 'Operating System :: POSIX',
23 'Programming Language :: Python :: 2',
24 'Programming Language :: Python :: 2.7',
25 'Programming Language :: Python :: 3',
26 'Programming Language :: Python :: 3.3',
27 'Programming Language :: Python :: 3.4',
28 'Programming Language :: Python :: 3.5',
29 'Topic :: Software Development',
30 ]
31
32 setup(
33 name='mypy_extensions',
34 version=version,
35 description=description,
36 long_description=long_description,
37 author='David Foster',
38 author_email='[email protected]',
39 url='http://www.mypy-lang.org/',
40 license='MIT License',
41 platforms=['POSIX'],
42 py_modules=['mypy_extensions'],
43 classifiers=classifiers,
44 )
45
[end of extensions/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/extensions/setup.py b/extensions/setup.py
--- a/extensions/setup.py
+++ b/extensions/setup.py
@@ -4,7 +4,7 @@
from distutils.core import setup
-version = '0.2.0-dev'
+version = '0.2.0'
description = 'Experimental type system extensions for programs checked with the mypy typechecker.'
long_description = '''
Mypy Extensions
@@ -26,6 +26,7 @@
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
]
@@ -34,8 +35,8 @@
version=version,
description=description,
long_description=long_description,
- author='David Foster',
- author_email='[email protected]',
+ author='The mypy developers',
+ author_email='[email protected]',
url='http://www.mypy-lang.org/',
license='MIT License',
platforms=['POSIX'],
| {"golden_diff": "diff --git a/extensions/setup.py b/extensions/setup.py\n--- a/extensions/setup.py\n+++ b/extensions/setup.py\n@@ -4,7 +4,7 @@\n \n from distutils.core import setup\n \n-version = '0.2.0-dev'\n+version = '0.2.0'\n description = 'Experimental type system extensions for programs checked with the mypy typechecker.'\n long_description = '''\n Mypy Extensions\n@@ -26,6 +26,7 @@\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n+ 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development',\n ]\n \n@@ -34,8 +35,8 @@\n version=version,\n description=description,\n long_description=long_description,\n- author='David Foster',\n- author_email='[email protected]',\n+ author='The mypy developers',\n+ author_email='[email protected]',\n url='http://www.mypy-lang.org/',\n license='MIT License',\n platforms=['POSIX'],\n", "issue": "mypy_extensions is listed as owned by David Foster\nSee https://github.com/python/mypy/blob/master/extensions/setup.py#L37\r\n\r\nDavid Foster did indeed create the first version but I presume he doesn't want to be bothered about the subsequent additions?\r\n\r\nWe should probably change this to \"The mypy developers\" -- but where to point the email? Maybe it can be omitted. The url might also better point to GitHub.\r\n\r\nAttn: @davidfstr \n", "before_files": [{"content": "#!/usr/bin/env python\n\n# NOTE: This package must support Python 2.7 in addition to Python 3.x\n\nfrom distutils.core import setup\n\nversion = '0.2.0-dev'\ndescription = 'Experimental type system extensions for programs checked with the mypy typechecker.'\nlong_description = '''\nMypy Extensions\n===============\n\nThe \"mypy_extensions\" module defines experimental extensions to the\nstandard \"typing\" module that are supported by the mypy typechecker.\n'''.lstrip()\n\nclassifiers = [\n 'Development Status :: 2 - Pre-Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Software Development',\n]\n\nsetup(\n name='mypy_extensions',\n version=version,\n description=description,\n long_description=long_description,\n author='David Foster',\n author_email='[email protected]',\n url='http://www.mypy-lang.org/',\n license='MIT License',\n platforms=['POSIX'],\n py_modules=['mypy_extensions'],\n classifiers=classifiers,\n)\n", "path": "extensions/setup.py"}]} | 1,023 | 254 |
gh_patches_debug_4534 | rasdani/github-patches | git_diff | bridgecrewio__checkov-4237 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
False positive for check CKV_AZURE_5: "Ensure RBAC is enabled on AKS clusters"
**Describe the issue**
The check CKV_AZURE_5 for terraform resource `azurerm_kubernetes_cluster` ensures that RBAC is enabled in the kubernetes cluster.
Depending on how the `role_based_access_control_enabled` property is set, the check result is exact or not :
- `role_based_access_control_enabled = true`: the check passes. It's ok.
- `role_based_access_control_enabled = false`: the check fails. It's ok.
- `role_based_access_control_enabled` not defined : check fails. It's NOT ok as default value of this property is `true` (see https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#role_based_access_control_enabled)
**Examples**
This example will fails but it shouldn't:
```
resource "azurerm_resource_group" "foo" {
name = "foo"
location = "West Europe"
}
resource "azurerm_kubernetes_cluster" "foo" {
name = "foo"
resource_group_name = azurerm_resource_group.foo.name
location = azurerm_resource_group.foo.location
dns_prefix = "foo"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2_v2"
}
identity {
type = "SystemAssigned"
}
# role_based_access_control_enabled = true
}
```
**Version (please complete the following information):**
- Checkov Version : `2.2.252` (latest docker image)
**Additional context**
The problem is in this source file : https://github.com/bridgecrewio/checkov/blob/48abe40926c97bd2e6f8c80491369be462ce3edd/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py#L19-L29
It returns `false` if the property is not found in the resource. It shouldn't be the case as the default value of the property is `true`
</issue>
<code>
[start of checkov/terraform/checks/resource/azure/AKSRbacEnabled.py]
1 import dpath.util
2 from checkov.common.models.enums import CheckCategories, CheckResult
3 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
4
5
6 class AKSRbacEnabled(BaseResourceCheck):
7 def __init__(self):
8 name = "Ensure RBAC is enabled on AKS clusters"
9 id = "CKV_AZURE_5"
10 supported_resources = ["azurerm_kubernetes_cluster"]
11 categories = [CheckCategories.KUBERNETES]
12 super().__init__(
13 name=name,
14 id=id,
15 categories=categories,
16 supported_resources=supported_resources,
17 )
18
19 def scan_resource_conf(self, conf):
20 self.evaluated_keys = [
21 "role_based_access_control/[0]/enabled", # azurerm < 2.99.0
22 "role_based_access_control_enabled", # azurerm >= 2.99.0
23 ]
24
25 for key in self.evaluated_keys:
26 if dpath.search(conf, key) and dpath.get(conf, key)[0]:
27 return CheckResult.PASSED
28
29 return CheckResult.FAILED
30
31
32 check = AKSRbacEnabled()
33
[end of checkov/terraform/checks/resource/azure/AKSRbacEnabled.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py b/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py
--- a/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py
+++ b/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py
@@ -23,10 +23,10 @@
]
for key in self.evaluated_keys:
- if dpath.search(conf, key) and dpath.get(conf, key)[0]:
- return CheckResult.PASSED
+ if dpath.search(conf, key):
+ return CheckResult.PASSED if dpath.get(conf, key)[0] else CheckResult.FAILED
- return CheckResult.FAILED
+ return CheckResult.PASSED
check = AKSRbacEnabled()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py b/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py\n--- a/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py\n+++ b/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py\n@@ -23,10 +23,10 @@\n ]\n \n for key in self.evaluated_keys:\n- if dpath.search(conf, key) and dpath.get(conf, key)[0]:\n- return CheckResult.PASSED\n+ if dpath.search(conf, key):\n+ return CheckResult.PASSED if dpath.get(conf, key)[0] else CheckResult.FAILED\n \n- return CheckResult.FAILED\n+ return CheckResult.PASSED\n \n \n check = AKSRbacEnabled()\n", "issue": "False positive for check CKV_AZURE_5: \"Ensure RBAC is enabled on AKS clusters\"\n**Describe the issue**\r\nThe check CKV_AZURE_5 for terraform resource `azurerm_kubernetes_cluster` ensures that RBAC is enabled in the kubernetes cluster.\r\nDepending on how the `role_based_access_control_enabled` property is set, the check result is exact or not :\r\n- `role_based_access_control_enabled = true`: the check passes. It's ok.\r\n- `role_based_access_control_enabled = false`: the check fails. It's ok.\r\n- `role_based_access_control_enabled` not defined : check fails. It's NOT ok as default value of this property is `true` (see https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#role_based_access_control_enabled)\r\n\r\n**Examples**\r\nThis example will fails but it shouldn't:\r\n```\r\nresource \"azurerm_resource_group\" \"foo\" {\r\n name = \"foo\"\r\n location = \"West Europe\"\r\n}\r\n\r\nresource \"azurerm_kubernetes_cluster\" \"foo\" {\r\n name = \"foo\"\r\n resource_group_name = azurerm_resource_group.foo.name\r\n location = azurerm_resource_group.foo.location\r\n dns_prefix = \"foo\"\r\n\r\n default_node_pool {\r\n name = \"default\"\r\n node_count = 1\r\n vm_size = \"Standard_D2_v2\"\r\n }\r\n\r\n identity {\r\n type = \"SystemAssigned\"\r\n }\r\n\r\n # role_based_access_control_enabled = true\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version : `2.2.252` (latest docker image)\r\n\r\n**Additional context**\r\nThe problem is in this source file : https://github.com/bridgecrewio/checkov/blob/48abe40926c97bd2e6f8c80491369be462ce3edd/checkov/terraform/checks/resource/azure/AKSRbacEnabled.py#L19-L29\r\n\r\nIt returns `false` if the property is not found in the resource. It shouldn't be the case as the default value of the property is `true`\r\n\n", "before_files": [{"content": "import dpath.util\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass AKSRbacEnabled(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure RBAC is enabled on AKS clusters\"\n id = \"CKV_AZURE_5\"\n supported_resources = [\"azurerm_kubernetes_cluster\"]\n categories = [CheckCategories.KUBERNETES]\n super().__init__(\n name=name,\n id=id,\n categories=categories,\n supported_resources=supported_resources,\n )\n\n def scan_resource_conf(self, conf):\n self.evaluated_keys = [\n \"role_based_access_control/[0]/enabled\", # azurerm < 2.99.0\n \"role_based_access_control_enabled\", # azurerm >= 2.99.0\n ]\n\n for key in self.evaluated_keys:\n if dpath.search(conf, key) and dpath.get(conf, key)[0]:\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = AKSRbacEnabled()\n", "path": "checkov/terraform/checks/resource/azure/AKSRbacEnabled.py"}]} | 1,334 | 189 |
gh_patches_debug_24671 | rasdani/github-patches | git_diff | docker__docker-py-45 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Failure to import requests.packages.urllib3.connectionpool
With requests 1.2.3 (the version which gets installed), this happens.
Workaround: use requests 1.2.0.
``` ipython
$ pip install --user docker-py
Downloading/unpacking docker-py
Downloading docker-py-0.1.5.tar.gz
Running setup.py egg_info for package docker-py
Requirement already satisfied (use --upgrade to upgrade): requests in /usr/lib/python2.7/dist-packages (from docker-py)
Requirement already satisfied (use --upgrade to upgrade): six in /usr/lib/python2.7/dist-packages (from docker-py)
Installing collected packages: docker-py
Running setup.py install for docker-py
Successfully installed docker-py
Cleaning up...
pwaller@fractal:~$ ipython
imporPython 2.7.5+ (default, Jun 5 2013, 10:40:07)
Type "copyright", "credits" or "license" for more information.
IPython 1.1.0 -- An enhanced Interactive Python.
? -> Introduction and overview of IPython's features.
%quickref -> Quick reference.
help -> Python's own help system.
object? -> Details about 'object', use 'object??' for extra details.
In [1]: import docker
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-1-3ac1c348f58a> in <module>()
----> 1 import docker
/home/pwaller/.local/lib/python2.7/site-packages/docker/__init__.py in <module>()
----> 1 from .client import Client
/home/pwaller/.local/lib/python2.7/site-packages/docker/client.py in <module>()
15 from requests.exceptions import HTTPError
16 from requests.adapters import HTTPAdapter
---> 17 from requests.packages.urllib3.connectionpool import HTTPConnectionPool
18
19 if six.PY3:
ImportError: No module named packages.urllib3.connectionpool
In [2]:
```
</issue>
<code>
[start of docker/unixconn.py]
1 # Copyright 2013 dotCloud inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import httplib
16 import requests.adapters
17 import requests.packages.urllib3.connectionpool
18 import socket
19
20 HTTPConnectionPool = requests.packages.urllib3.connectionpool.HTTPConnectionPool
21
22
23 class UnixHTTPConnection(httplib.HTTPConnection, object):
24 def __init__(self, base_url, unix_socket):
25 httplib.HTTPConnection.__init__(self, 'localhost')
26 self.base_url = base_url
27 self.unix_socket = unix_socket
28
29 def connect(self):
30 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
31 sock.connect(self.base_url.replace("unix:/",""))
32 self.sock = sock
33
34 def _extract_path(self, url):
35 #remove the base_url entirely..
36 return url.replace(self.base_url, "")
37
38 def request(self, method, url, **kwargs):
39 url = self._extract_path(self.unix_socket)
40 super(UnixHTTPConnection, self).request(method, url, **kwargs)
41
42
43 class UnixHTTPConnectionPool(HTTPConnectionPool):
44 def __init__(self, base_url, socket_path):
45 self.socket_path = socket_path
46 self.base_url = base_url
47 super(UnixHTTPConnectionPool, self).__init__(self, 'localhost')
48
49 def _new_conn(self):
50 return UnixHTTPConnection(self.base_url, self.socket_path)
51
52
53 class UnixAdapter(requests.adapters.HTTPAdapter):
54 def __init__(self, base_url):
55 self.base_url = base_url
56 super(UnixAdapter, self).__init__()
57
58 def get_connection(self, socket_path, proxies=None):
59 return UnixHTTPConnectionPool(self.base_url, socket_path)
60
[end of docker/unixconn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/unixconn.py b/docker/unixconn.py
--- a/docker/unixconn.py
+++ b/docker/unixconn.py
@@ -14,10 +14,12 @@
import httplib
import requests.adapters
-import requests.packages.urllib3.connectionpool
import socket
-HTTPConnectionPool = requests.packages.urllib3.connectionpool.HTTPConnectionPool
+try:
+ import requests.packages.urllib3.connectionpool as connectionpool
+except ImportError:
+ import urllib3.connectionpool as connectionpool
class UnixHTTPConnection(httplib.HTTPConnection, object):
@@ -28,7 +30,7 @@
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- sock.connect(self.base_url.replace("unix:/",""))
+ sock.connect(self.base_url.replace("unix:/", ""))
self.sock = sock
def _extract_path(self, url):
@@ -40,7 +42,7 @@
super(UnixHTTPConnection, self).request(method, url, **kwargs)
-class UnixHTTPConnectionPool(HTTPConnectionPool):
+class UnixHTTPConnectionPool(connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path):
self.socket_path = socket_path
self.base_url = base_url
| {"golden_diff": "diff --git a/docker/unixconn.py b/docker/unixconn.py\n--- a/docker/unixconn.py\n+++ b/docker/unixconn.py\n@@ -14,10 +14,12 @@\n \n import httplib\n import requests.adapters\n-import requests.packages.urllib3.connectionpool\n import socket\n \n-HTTPConnectionPool = requests.packages.urllib3.connectionpool.HTTPConnectionPool\n+try:\n+ import requests.packages.urllib3.connectionpool as connectionpool\n+except ImportError:\n+ import urllib3.connectionpool as connectionpool\n \n \n class UnixHTTPConnection(httplib.HTTPConnection, object):\n@@ -28,7 +30,7 @@\n \n def connect(self):\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n- sock.connect(self.base_url.replace(\"unix:/\",\"\"))\n+ sock.connect(self.base_url.replace(\"unix:/\", \"\"))\n self.sock = sock\n \n def _extract_path(self, url):\n@@ -40,7 +42,7 @@\n super(UnixHTTPConnection, self).request(method, url, **kwargs)\n \n \n-class UnixHTTPConnectionPool(HTTPConnectionPool):\n+class UnixHTTPConnectionPool(connectionpool.HTTPConnectionPool):\n def __init__(self, base_url, socket_path):\n self.socket_path = socket_path\n self.base_url = base_url\n", "issue": "Failure to import requests.packages.urllib3.connectionpool\nWith requests 1.2.3 (the version which gets installed), this happens.\n\nWorkaround: use requests 1.2.0.\n\n``` ipython\n$ pip install --user docker-py\nDownloading/unpacking docker-py\n Downloading docker-py-0.1.5.tar.gz\n Running setup.py egg_info for package docker-py\n\nRequirement already satisfied (use --upgrade to upgrade): requests in /usr/lib/python2.7/dist-packages (from docker-py)\nRequirement already satisfied (use --upgrade to upgrade): six in /usr/lib/python2.7/dist-packages (from docker-py)\nInstalling collected packages: docker-py\n Running setup.py install for docker-py\n\nSuccessfully installed docker-py\nCleaning up...\npwaller@fractal:~$ ipython\nimporPython 2.7.5+ (default, Jun 5 2013, 10:40:07) \nType \"copyright\", \"credits\" or \"license\" for more information.\n\nIPython 1.1.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython's features.\n%quickref -> Quick reference.\nhelp -> Python's own help system.\nobject? -> Details about 'object', use 'object??' for extra details.\n\nIn [1]: import docker\n---------------------------------------------------------------------------\nImportError Traceback (most recent call last)\n<ipython-input-1-3ac1c348f58a> in <module>()\n----> 1 import docker\n\n/home/pwaller/.local/lib/python2.7/site-packages/docker/__init__.py in <module>()\n----> 1 from .client import Client\n\n/home/pwaller/.local/lib/python2.7/site-packages/docker/client.py in <module>()\n 15 from requests.exceptions import HTTPError\n 16 from requests.adapters import HTTPAdapter\n---> 17 from requests.packages.urllib3.connectionpool import HTTPConnectionPool\n 18 \n 19 if six.PY3:\n\nImportError: No module named packages.urllib3.connectionpool\n\nIn [2]: \n```\n\n", "before_files": [{"content": "# Copyright 2013 dotCloud inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport httplib\nimport requests.adapters\nimport requests.packages.urllib3.connectionpool\nimport socket\n\nHTTPConnectionPool = requests.packages.urllib3.connectionpool.HTTPConnectionPool\n\n\nclass UnixHTTPConnection(httplib.HTTPConnection, object):\n def __init__(self, base_url, unix_socket):\n httplib.HTTPConnection.__init__(self, 'localhost')\n self.base_url = base_url\n self.unix_socket = unix_socket\n\n def connect(self):\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(self.base_url.replace(\"unix:/\",\"\"))\n self.sock = sock\n\n def _extract_path(self, url):\n #remove the base_url entirely..\n return url.replace(self.base_url, \"\")\n\n def request(self, method, url, **kwargs):\n url = self._extract_path(self.unix_socket)\n super(UnixHTTPConnection, self).request(method, url, **kwargs)\n\n\nclass UnixHTTPConnectionPool(HTTPConnectionPool):\n def __init__(self, base_url, socket_path):\n self.socket_path = socket_path\n self.base_url = base_url\n super(UnixHTTPConnectionPool, self).__init__(self, 'localhost')\n\n def _new_conn(self):\n return UnixHTTPConnection(self.base_url, self.socket_path)\n\n\nclass UnixAdapter(requests.adapters.HTTPAdapter):\n def __init__(self, base_url):\n self.base_url = base_url\n super(UnixAdapter, self).__init__()\n\n def get_connection(self, socket_path, proxies=None):\n return UnixHTTPConnectionPool(self.base_url, socket_path)\n", "path": "docker/unixconn.py"}]} | 1,598 | 283 |
gh_patches_debug_4916 | rasdani/github-patches | git_diff | e-valuation__EvaP-566 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
colorize average grades on course detail pages
the numbers in the lower right should be css'd like the ones in the upper left.

</issue>
<code>
[start of evap/evaluation/templatetags/evaluation_templatetags.py]
1 from django.template import Library
2
3 register = Library()
4
5
6 @register.inclusion_tag("user_list_with_links.html")
7 def include_user_list_with_links(users):
8 return dict(users=users)
9
10
11 @register.inclusion_tag("sortable_form_js.html")
12 def include_sortable_form_js():
13 return dict()
14
15 @register.inclusion_tag("progress_bar.html")
16 def include_progress_bar(done, total, large=False):
17 return dict(done=done, total=total, large=large)
18
[end of evap/evaluation/templatetags/evaluation_templatetags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/evaluation/templatetags/evaluation_templatetags.py b/evap/evaluation/templatetags/evaluation_templatetags.py
--- a/evap/evaluation/templatetags/evaluation_templatetags.py
+++ b/evap/evaluation/templatetags/evaluation_templatetags.py
@@ -15,3 +15,7 @@
@register.inclusion_tag("progress_bar.html")
def include_progress_bar(done, total, large=False):
return dict(done=done, total=total, large=large)
+
[email protected]_tag("result_bar.html")
+def include_result_bar(result, show_grades, questionnaire_warning=False):
+ return dict(result=result, show_grades=show_grades, questionnaire_warning=questionnaire_warning)
| {"golden_diff": "diff --git a/evap/evaluation/templatetags/evaluation_templatetags.py b/evap/evaluation/templatetags/evaluation_templatetags.py\n--- a/evap/evaluation/templatetags/evaluation_templatetags.py\n+++ b/evap/evaluation/templatetags/evaluation_templatetags.py\n@@ -15,3 +15,7 @@\n @register.inclusion_tag(\"progress_bar.html\")\n def include_progress_bar(done, total, large=False):\n return dict(done=done, total=total, large=large)\n+\[email protected]_tag(\"result_bar.html\")\n+def include_result_bar(result, show_grades, questionnaire_warning=False):\n+ return dict(result=result, show_grades=show_grades, questionnaire_warning=questionnaire_warning)\n", "issue": "colorize average grades on course detail pages\nthe numbers in the lower right should be css'd like the ones in the upper left.\n\n\n\n", "before_files": [{"content": "from django.template import Library\n\nregister = Library()\n\n\[email protected]_tag(\"user_list_with_links.html\")\ndef include_user_list_with_links(users):\n return dict(users=users)\n\n\[email protected]_tag(\"sortable_form_js.html\")\ndef include_sortable_form_js():\n return dict()\n\[email protected]_tag(\"progress_bar.html\")\ndef include_progress_bar(done, total, large=False):\n return dict(done=done, total=total, large=large)\n", "path": "evap/evaluation/templatetags/evaluation_templatetags.py"}]} | 800 | 182 |
gh_patches_debug_17091 | rasdani/github-patches | git_diff | Kinto__kinto-493 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add creation date and current kinto version to generated config file
Using comments for example:
``` ini
# Created at Thu, 03 Mar 2016 17:02:37 +0100
# Using Kinto version 1.11.2
[server:main]
use = egg:waitress#main
host = 0.0.0.0
port = 8888
```
</issue>
<code>
[start of kinto/config/__init__.py]
1 import os
2 import codecs
3
4 from cliquet import utils as cliquet_utils
5
6 from kinto import logger
7
8 HERE = os.path.abspath(os.path.dirname(__file__))
9
10
11 def render_template(template, destination, **kwargs):
12 template = os.path.join(HERE, template)
13 folder = os.path.dirname(destination)
14
15 if folder and not os.path.exists(folder):
16 os.makedirs(folder)
17
18 logger.info("Created config {}".format(os.path.abspath(destination)))
19
20 with codecs.open(template, 'r', encoding='utf-8') as f:
21 raw_template = f.read()
22 rendered = raw_template.format(**kwargs)
23 with codecs.open(destination, 'w+', encoding='utf-8') as output:
24 output.write(rendered)
25
26
27 def init(config_file, backend):
28 values = {}
29
30 values['secret'] = cliquet_utils.random_bytes_hex(32)
31
32 values['storage_backend'] = "cliquet.storage.%s" % backend
33 values['cache_backend'] = "cliquet.cache.%s" % backend
34 values['permission_backend'] = "cliquet.permission.%s" % backend
35
36 if backend == 'postgresql':
37 postgresql_url = "postgres://postgres:postgres@localhost/postgres"
38 values['storage_url'] = postgresql_url
39 values['cache_url'] = postgresql_url
40 values['permission_url'] = postgresql_url
41
42 elif backend == 'redis':
43 redis_url = "redis://localhost:6379"
44 values['storage_url'] = redis_url + "/1"
45 values['cache_url'] = redis_url + "/2"
46 values['permission_url'] = redis_url + "/3"
47
48 else:
49 values['storage_url'] = ''
50 values['cache_url'] = ''
51 values['permission_url'] = ''
52
53 render_template("kinto.tpl", config_file, **values)
54
[end of kinto/config/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py
--- a/kinto/config/__init__.py
+++ b/kinto/config/__init__.py
@@ -1,9 +1,11 @@
import os
import codecs
+from time import strftime
from cliquet import utils as cliquet_utils
from kinto import logger
+from kinto import __version__
HERE = os.path.abspath(os.path.dirname(__file__))
@@ -29,6 +31,9 @@
values['secret'] = cliquet_utils.random_bytes_hex(32)
+ values['kinto_version'] = __version__
+ values['config_file_timestamp'] = strftime('%a, %d %b %Y %H:%M:%S %z')
+
values['storage_backend'] = "cliquet.storage.%s" % backend
values['cache_backend'] = "cliquet.cache.%s" % backend
values['permission_backend'] = "cliquet.permission.%s" % backend
| {"golden_diff": "diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py\n--- a/kinto/config/__init__.py\n+++ b/kinto/config/__init__.py\n@@ -1,9 +1,11 @@\n import os\n import codecs\n+from time import strftime\n \n from cliquet import utils as cliquet_utils\n \n from kinto import logger\n+from kinto import __version__\n \n HERE = os.path.abspath(os.path.dirname(__file__))\n \n@@ -29,6 +31,9 @@\n \n values['secret'] = cliquet_utils.random_bytes_hex(32)\n \n+ values['kinto_version'] = __version__\n+ values['config_file_timestamp'] = strftime('%a, %d %b %Y %H:%M:%S %z')\n+\n values['storage_backend'] = \"cliquet.storage.%s\" % backend\n values['cache_backend'] = \"cliquet.cache.%s\" % backend\n values['permission_backend'] = \"cliquet.permission.%s\" % backend\n", "issue": "Add creation date and current kinto version to generated config file\nUsing comments for example:\n\n``` ini\n# Created at Thu, 03 Mar 2016 17:02:37 +0100\n# Using Kinto version 1.11.2\n\n[server:main]\nuse = egg:waitress#main\nhost = 0.0.0.0\nport = 8888\n\n```\n\n", "before_files": [{"content": "import os\nimport codecs\n\nfrom cliquet import utils as cliquet_utils\n\nfrom kinto import logger\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef render_template(template, destination, **kwargs):\n template = os.path.join(HERE, template)\n folder = os.path.dirname(destination)\n\n if folder and not os.path.exists(folder):\n os.makedirs(folder)\n\n logger.info(\"Created config {}\".format(os.path.abspath(destination)))\n\n with codecs.open(template, 'r', encoding='utf-8') as f:\n raw_template = f.read()\n rendered = raw_template.format(**kwargs)\n with codecs.open(destination, 'w+', encoding='utf-8') as output:\n output.write(rendered)\n\n\ndef init(config_file, backend):\n values = {}\n\n values['secret'] = cliquet_utils.random_bytes_hex(32)\n\n values['storage_backend'] = \"cliquet.storage.%s\" % backend\n values['cache_backend'] = \"cliquet.cache.%s\" % backend\n values['permission_backend'] = \"cliquet.permission.%s\" % backend\n\n if backend == 'postgresql':\n postgresql_url = \"postgres://postgres:postgres@localhost/postgres\"\n values['storage_url'] = postgresql_url\n values['cache_url'] = postgresql_url\n values['permission_url'] = postgresql_url\n\n elif backend == 'redis':\n redis_url = \"redis://localhost:6379\"\n values['storage_url'] = redis_url + \"/1\"\n values['cache_url'] = redis_url + \"/2\"\n values['permission_url'] = redis_url + \"/3\"\n\n else:\n values['storage_url'] = ''\n values['cache_url'] = ''\n values['permission_url'] = ''\n\n render_template(\"kinto.tpl\", config_file, **values)\n", "path": "kinto/config/__init__.py"}]} | 1,139 | 226 |
gh_patches_debug_7807 | rasdani/github-patches | git_diff | locustio__locust-2609 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Report][Modern-UI] HTML report is blank
### Prerequisites
- [X] I am using [the latest version of Locust](https://github.com/locustio/locust/releases/)
- [X] I am reporting a bug, not asking a question
### Description
Run a test then open the HTML report -> Noticed that it is blank
Note: This bug occurs from 2.22.0, and did not occur on 2.21.0

### Command line
locust -f SimpleWeb.py -u 100 -r 10 -t 30s --html=samplelocust.html
### Locustfile contents
```python3
from locust import FastHttpUser, HttpUser, between, constant_pacing, events, task
from loguru import logger
class QuickstartUser(FastHttpUser):
wait_time = between(2, 5)
host = "http://127.0.0.1:5000"
# begin = time.time()
@task()
def get_tasks_1(self):
res = None
try:
payload = {}
headers = {"Cache-Control": "max-age=0, no-cache, no-store, must-revalidate"}
res = self.client.get("/api/tasks", headers=headers, data=payload, name="Get Tasks")
except Exception as exception:
logger.error(exception)
@task()
def post_lewin(self):
try:
payload = {}
headers = {"Cache-Control": "max-age=0, no-cache, no-store, must-revalidate"}
self.client.post("/api/lewin", headers=headers, data=payload, name="Post Lewin")
except Exception as exception:
logger.error(exception)
```
### Python version
3.9.18
### Locust version
2.23.1
### Operating system
macOS 14.2.1 (23C71)
</issue>
<code>
[start of examples/web_ui_auth.py]
1 """
2 Example of implementing authentication for Locust when the --web-login flag is given
3
4 This is only to serve as a starting point, proper authentication should be implemented
5 according to your projects specifications.
6
7 For more information, see https://docs.locust.io/en/stable/extending-locust.html#authentication
8 """
9 from locust import HttpUser, events
10
11 import json
12 import os
13
14 from flask import Blueprint, make_response, redirect, request, session, url_for
15 from flask_login import UserMixin, login_user
16
17
18 class LocustHttpUser(HttpUser):
19 pass
20
21
22 class AuthUser(UserMixin):
23 def __init__(self, username):
24 self.username = username
25
26 def get_id(self):
27 return self.username
28
29
30 auth_blueprint = Blueprint("auth", "web_ui_auth")
31
32
33 def load_user(user_id):
34 return AuthUser(session.get("username"))
35
36
37 @events.init.add_listener
38 def locust_init(environment, **kwargs):
39 if environment.web_ui:
40 environment.web_ui.login_manager.user_loader(load_user)
41
42 environment.web_ui.app.config["SECRET_KEY"] = os.getenv("FLASK_SECRET_KEY")
43
44 environment.web_ui.auth_args = {
45 "username_password_callback": "/login_submit",
46 "auth_providers": [
47 {
48 "label": "Github",
49 "callback_url": "/login/github",
50 "icon_url": "https://static-00.iconduck.com/assets.00/github-icon-1024x994-4h5sdmko.png",
51 },
52 ],
53 }
54
55 @auth_blueprint.route("/login/github")
56 def google_login():
57 # Implement authentication with desired auth provider
58 username = "username"
59 session["username"] = username
60 login_user(AuthUser("username"))
61
62 return redirect(url_for("index"))
63
64 @auth_blueprint.route("/login_submit")
65 def login_submit():
66 username = request.args.get("username")
67 password = request.args.get("password")
68
69 # Implement real password verification here
70 if password:
71 session["username"] = username
72 login_user(AuthUser(username))
73
74 return redirect(url_for("index"))
75
76 environment.web_ui.auth_args = {**environment.web_ui.auth_args, "error": "Invalid username or password"}
77
78 return redirect(url_for("login"))
79
80 environment.web_ui.app.register_blueprint(auth_blueprint)
81
[end of examples/web_ui_auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/web_ui_auth.py b/examples/web_ui_auth.py
--- a/examples/web_ui_auth.py
+++ b/examples/web_ui_auth.py
@@ -6,7 +6,7 @@
For more information, see https://docs.locust.io/en/stable/extending-locust.html#authentication
"""
-from locust import HttpUser, events
+from locust import HttpUser, events, task
import json
import os
@@ -16,7 +16,9 @@
class LocustHttpUser(HttpUser):
- pass
+ @task
+ def example(self):
+ self.client.get("/")
class AuthUser(UserMixin):
| {"golden_diff": "diff --git a/examples/web_ui_auth.py b/examples/web_ui_auth.py\n--- a/examples/web_ui_auth.py\n+++ b/examples/web_ui_auth.py\n@@ -6,7 +6,7 @@\n \n For more information, see https://docs.locust.io/en/stable/extending-locust.html#authentication\n \"\"\"\n-from locust import HttpUser, events\n+from locust import HttpUser, events, task\n \n import json\n import os\n@@ -16,7 +16,9 @@\n \n \n class LocustHttpUser(HttpUser):\n- pass\n+ @task\n+ def example(self):\n+ self.client.get(\"/\")\n \n \n class AuthUser(UserMixin):\n", "issue": "[Report][Modern-UI] HTML report is blank\n### Prerequisites\n\n- [X] I am using [the latest version of Locust](https://github.com/locustio/locust/releases/)\n- [X] I am reporting a bug, not asking a question\n\n### Description\n\nRun a test then open the HTML report -> Noticed that it is blank\r\nNote: This bug occurs from 2.22.0, and did not occur on 2.21.0 \r\n\r\n\n\n### Command line\n\nlocust -f SimpleWeb.py -u 100 -r 10 -t 30s --html=samplelocust.html\n\n### Locustfile contents\n\n```python3\nfrom locust import FastHttpUser, HttpUser, between, constant_pacing, events, task\r\nfrom loguru import logger\r\n\r\n\r\nclass QuickstartUser(FastHttpUser):\r\n wait_time = between(2, 5)\r\n\r\n\r\n host = \"http://127.0.0.1:5000\"\r\n # begin = time.time()\r\n\r\n @task()\r\n def get_tasks_1(self):\r\n res = None\r\n try:\r\n payload = {}\r\n headers = {\"Cache-Control\": \"max-age=0, no-cache, no-store, must-revalidate\"}\r\n res = self.client.get(\"/api/tasks\", headers=headers, data=payload, name=\"Get Tasks\")\r\n except Exception as exception:\r\n logger.error(exception)\r\n\r\n @task()\r\n def post_lewin(self):\r\n try:\r\n payload = {}\r\n headers = {\"Cache-Control\": \"max-age=0, no-cache, no-store, must-revalidate\"}\r\n self.client.post(\"/api/lewin\", headers=headers, data=payload, name=\"Post Lewin\")\r\n except Exception as exception:\r\n logger.error(exception)\n```\n\n\n### Python version\n\n3.9.18\n\n### Locust version\n\n2.23.1\n\n### Operating system\n\nmacOS 14.2.1 (23C71)\n", "before_files": [{"content": "\"\"\"\nExample of implementing authentication for Locust when the --web-login flag is given\n\nThis is only to serve as a starting point, proper authentication should be implemented\naccording to your projects specifications.\n\nFor more information, see https://docs.locust.io/en/stable/extending-locust.html#authentication\n\"\"\"\nfrom locust import HttpUser, events\n\nimport json\nimport os\n\nfrom flask import Blueprint, make_response, redirect, request, session, url_for\nfrom flask_login import UserMixin, login_user\n\n\nclass LocustHttpUser(HttpUser):\n pass\n\n\nclass AuthUser(UserMixin):\n def __init__(self, username):\n self.username = username\n\n def get_id(self):\n return self.username\n\n\nauth_blueprint = Blueprint(\"auth\", \"web_ui_auth\")\n\n\ndef load_user(user_id):\n return AuthUser(session.get(\"username\"))\n\n\[email protected]_listener\ndef locust_init(environment, **kwargs):\n if environment.web_ui:\n environment.web_ui.login_manager.user_loader(load_user)\n\n environment.web_ui.app.config[\"SECRET_KEY\"] = os.getenv(\"FLASK_SECRET_KEY\")\n\n environment.web_ui.auth_args = {\n \"username_password_callback\": \"/login_submit\",\n \"auth_providers\": [\n {\n \"label\": \"Github\",\n \"callback_url\": \"/login/github\",\n \"icon_url\": \"https://static-00.iconduck.com/assets.00/github-icon-1024x994-4h5sdmko.png\",\n },\n ],\n }\n\n @auth_blueprint.route(\"/login/github\")\n def google_login():\n # Implement authentication with desired auth provider\n username = \"username\"\n session[\"username\"] = username\n login_user(AuthUser(\"username\"))\n\n return redirect(url_for(\"index\"))\n\n @auth_blueprint.route(\"/login_submit\")\n def login_submit():\n username = request.args.get(\"username\")\n password = request.args.get(\"password\")\n\n # Implement real password verification here\n if password:\n session[\"username\"] = username\n login_user(AuthUser(username))\n\n return redirect(url_for(\"index\"))\n\n environment.web_ui.auth_args = {**environment.web_ui.auth_args, \"error\": \"Invalid username or password\"}\n\n return redirect(url_for(\"login\"))\n\n environment.web_ui.app.register_blueprint(auth_blueprint)\n", "path": "examples/web_ui_auth.py"}]} | 1,677 | 144 |
gh_patches_debug_888 | rasdani/github-patches | git_diff | helmholtz-analytics__heat-1268 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix Pytorch release tracking workflows
## Due Diligence
<!--- Please address the following points before setting your PR "ready for review".
--->
- General:
- [x] **base branch** must be `main` for new features, latest release branch (e.g. `release/1.3.x`) for bug fixes
- [x] **title** of the PR is suitable to appear in the [Release Notes](https://github.com/helmholtz-analytics/heat/releases/latest)
- Implementation:
- [x] unit tests: all split configurations tested
- [x] unit tests: multiple dtypes tested
- [x] documentation updated where needed
## Description
<!--- Include a summary of the change/s.
Please also include relevant motivation and context. List any dependencies that are required for this change.
--->
Issue/s resolved: #1241
## Changes proposed:
- upgrade to the latest version of checkout action
- delete the token parameter such that the default action token is used
## Type of change
<!--
i.e.
- Bug fix (non-breaking change which fixes an issue)
- New feature (non-breaking change which adds functionality)
- Breaking change (fix or feature that would cause existing functionality to not work as expected)
- Documentation update
--->
## Memory requirements
<!--- Compare memory requirements to previous implementation / relevant torch operations if applicable:
- in distributed and non-distributed mode
- with `split=None` and `split not None`
This can be done using https://github.com/pythonprofilers/memory_profiler for CPU memory measurements,
GPU measurements can be done with https://pytorch.org/docs/master/generated/torch.cuda.max_memory_allocated.html.
These tools only profile the memory used by each process, not the entire function.
--->
## Performance
<!--- Compare performance to previous implementation / relevant torch operations if applicable:
- in distributed and non-distributed mode
- with `split=None` and `split not None`
Python has an embedded profiler: https://docs.python.org/3.9/library/profile.html
Again, this will only profile the performance on each process. Printing the results with many processes
may be illegible. It may be easiest to save the output of each to a file.
--->
#### Does this change modify the behaviour of other functions? If so, which?
no
</issue>
<code>
[start of heat/core/version.py]
1 """This module contains Heat's version information."""
2
3
4 major: int = 1
5 """Indicates Heat's main version."""
6 minor: int = 3
7 """Indicates feature extension."""
8 micro: int = 0
9 """Indicates revisions for bugfixes."""
10 extension: str = "dev"
11 """Indicates special builds, e.g. for specific hardware."""
12
13 if not extension:
14 __version__: str = f"{major}.{minor}.{micro}"
15 """The combined version string, consisting out of major, minor, micro and possibly extension."""
16 else:
17 __version__: str = f"{major}.{minor}.{micro}-{extension}"
18
[end of heat/core/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/heat/core/version.py b/heat/core/version.py
--- a/heat/core/version.py
+++ b/heat/core/version.py
@@ -3,7 +3,7 @@
major: int = 1
"""Indicates Heat's main version."""
-minor: int = 3
+minor: int = 4
"""Indicates feature extension."""
micro: int = 0
"""Indicates revisions for bugfixes."""
| {"golden_diff": "diff --git a/heat/core/version.py b/heat/core/version.py\n--- a/heat/core/version.py\n+++ b/heat/core/version.py\n@@ -3,7 +3,7 @@\n \n major: int = 1\n \"\"\"Indicates Heat's main version.\"\"\"\n-minor: int = 3\n+minor: int = 4\n \"\"\"Indicates feature extension.\"\"\"\n micro: int = 0\n \"\"\"Indicates revisions for bugfixes.\"\"\"\n", "issue": "Fix Pytorch release tracking workflows\n## Due Diligence\r\n<!--- Please address the following points before setting your PR \"ready for review\".\r\n--->\r\n- General:\r\n - [x] **base branch** must be `main` for new features, latest release branch (e.g. `release/1.3.x`) for bug fixes\r\n - [x] **title** of the PR is suitable to appear in the [Release Notes](https://github.com/helmholtz-analytics/heat/releases/latest)\r\n- Implementation:\r\n - [x] unit tests: all split configurations tested\r\n - [x] unit tests: multiple dtypes tested\r\n - [x] documentation updated where needed\r\n\r\n## Description\r\n\r\n<!--- Include a summary of the change/s.\r\nPlease also include relevant motivation and context. List any dependencies that are required for this change.\r\n--->\r\n\r\nIssue/s resolved: #1241 \r\n\r\n## Changes proposed:\r\n\r\n- upgrade to the latest version of checkout action\r\n- delete the token parameter such that the default action token is used\r\n\r\n## Type of change\r\n<!--\r\ni.e.\r\n- Bug fix (non-breaking change which fixes an issue)\r\n- New feature (non-breaking change which adds functionality)\r\n- Breaking change (fix or feature that would cause existing functionality to not work as expected)\r\n- Documentation update\r\n--->\r\n\r\n## Memory requirements\r\n<!--- Compare memory requirements to previous implementation / relevant torch operations if applicable:\r\n- in distributed and non-distributed mode\r\n- with `split=None` and `split not None`\r\n\r\nThis can be done using https://github.com/pythonprofilers/memory_profiler for CPU memory measurements,\r\nGPU measurements can be done with https://pytorch.org/docs/master/generated/torch.cuda.max_memory_allocated.html.\r\nThese tools only profile the memory used by each process, not the entire function.\r\n--->\r\n\r\n## Performance\r\n<!--- Compare performance to previous implementation / relevant torch operations if applicable:\r\n- in distributed and non-distributed mode\r\n- with `split=None` and `split not None`\r\n\r\nPython has an embedded profiler: https://docs.python.org/3.9/library/profile.html\r\nAgain, this will only profile the performance on each process. Printing the results with many processes\r\nmay be illegible. It may be easiest to save the output of each to a file.\r\n--->\r\n\r\n#### Does this change modify the behaviour of other functions? If so, which?\r\nno\r\n\n", "before_files": [{"content": "\"\"\"This module contains Heat's version information.\"\"\"\n\n\nmajor: int = 1\n\"\"\"Indicates Heat's main version.\"\"\"\nminor: int = 3\n\"\"\"Indicates feature extension.\"\"\"\nmicro: int = 0\n\"\"\"Indicates revisions for bugfixes.\"\"\"\nextension: str = \"dev\"\n\"\"\"Indicates special builds, e.g. for specific hardware.\"\"\"\n\nif not extension:\n __version__: str = f\"{major}.{minor}.{micro}\"\n \"\"\"The combined version string, consisting out of major, minor, micro and possibly extension.\"\"\"\nelse:\n __version__: str = f\"{major}.{minor}.{micro}-{extension}\"\n", "path": "heat/core/version.py"}]} | 1,192 | 97 |
gh_patches_debug_28436 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-4749 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
shapely hook doesn't work on windows
Using current develop, the shapely hook fails when it runs `binaries += [(os.path.join(lib_dir, f), '') for f in os.listdir(lib_dir)]`. `lib_dir` here equals `Lib/site-packages/shapely/DLLs`. The actual directory on my conda python 3.6 installation is `Library/bin/`. My old spec file uses the following ugly code to copy these libraries over:
```
lib_dir = sys.executable.replace("python.exe", os.path.join("Library", "bin"))
binaries += [(os.path.join(lib_dir, 'geos_c.dll'), '')]
binaries += [(os.path.join(lib_dir, 'geos.dll'), '')]
binaries += [(os.path.join(lib_dir, 'mkl_*.dll'), '')]
```
Is there a better way to get a hold of this Library directory with some pyinstaller utility function? Does anyone know if other python environments (non-conda) have the directory used in the hook or @durden did you just guess on the Windows path?
Side issue: Shapely 1.6+ doesn't seem to work on at least windows (haven't updated on other platforms). It fails to find the geos libraries mentioned above unless you execute the pyinstaller-made (inno setup packaged) executable from the install directory (`C:\Program Files (x86)\myprgm\bin\`). For now I'm just downgrading to 1.5.17.
shapely hook doesn't work on windows
Using current develop, the shapely hook fails when it runs `binaries += [(os.path.join(lib_dir, f), '') for f in os.listdir(lib_dir)]`. `lib_dir` here equals `Lib/site-packages/shapely/DLLs`. The actual directory on my conda python 3.6 installation is `Library/bin/`. My old spec file uses the following ugly code to copy these libraries over:
```
lib_dir = sys.executable.replace("python.exe", os.path.join("Library", "bin"))
binaries += [(os.path.join(lib_dir, 'geos_c.dll'), '')]
binaries += [(os.path.join(lib_dir, 'geos.dll'), '')]
binaries += [(os.path.join(lib_dir, 'mkl_*.dll'), '')]
```
Is there a better way to get a hold of this Library directory with some pyinstaller utility function? Does anyone know if other python environments (non-conda) have the directory used in the hook or @durden did you just guess on the Windows path?
Side issue: Shapely 1.6+ doesn't seem to work on at least windows (haven't updated on other platforms). It fails to find the geos libraries mentioned above unless you execute the pyinstaller-made (inno setup packaged) executable from the install directory (`C:\Program Files (x86)\myprgm\bin\`). For now I'm just downgrading to 1.5.17.
</issue>
<code>
[start of PyInstaller/hooks/hook-shapely.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2017-2020, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 import os
13
14 from PyInstaller.utils.hooks import get_package_paths
15 from PyInstaller.utils.hooks import is_module_satisfies
16 from PyInstaller import compat
17
18 # Necessary when using the vectorized subpackage
19 hiddenimports = ['shapely.prepared']
20
21 pkg_base, pkg_dir = get_package_paths('shapely')
22
23
24 binaries = []
25 if compat.is_win:
26 if compat.is_conda:
27 lib_dir = os.path.join(compat.base_prefix, 'Library', 'bin')
28 else:
29 lib_dir = os.path.join(pkg_dir, 'DLLs')
30 dll_files = ['geos_c.dll', 'geos.dll']
31 binaries += [(os.path.join(lib_dir, f), '.') for f in dll_files]
32 elif compat.is_linux:
33 lib_dir = os.path.join(pkg_dir, '.libs')
34 dest_dir = os.path.join('shapely', '.libs')
35
36 # This duplicates the libgeos*.so* files in the build. PyInstaller will
37 # copy them into the root of the build by default, but shapely cannot load
38 # them from there in linux IF shapely was installed via a whl file. The
39 # whl bundles its' own libgeos with a different name, something like
40 # libgeos_c-*.so.* but shapely tries to load libgeos_c.so if there isn't a
41 # ./libs directory under its' package. There is a proposed fix for this in
42 # shapely but it has not been accepted it:
43 # https://github.com/Toblerity/Shapely/pull/485
44 if is_module_satisfies('shapely <= 1.6'):
45 binaries += [(os.path.join(lib_dir, f), dest_dir) for f in os.listdir(lib_dir)]
46
[end of PyInstaller/hooks/hook-shapely.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/hook-shapely.py b/PyInstaller/hooks/hook-shapely.py
--- a/PyInstaller/hooks/hook-shapely.py
+++ b/PyInstaller/hooks/hook-shapely.py
@@ -10,6 +10,7 @@
#-----------------------------------------------------------------------------
import os
+from ctypes.util import find_library
from PyInstaller.utils.hooks import get_package_paths
from PyInstaller.utils.hooks import is_module_satisfies
@@ -23,12 +24,25 @@
binaries = []
if compat.is_win:
+ # Search conda directory if conda is active, then search standard
+ # directory. This is the same order of precidence used in shapely.
+ standard_path = os.path.join(pkg_dir, 'DLLs')
+ lib_paths = [standard_path, os.environ['PATH']]
if compat.is_conda:
- lib_dir = os.path.join(compat.base_prefix, 'Library', 'bin')
- else:
- lib_dir = os.path.join(pkg_dir, 'DLLs')
- dll_files = ['geos_c.dll', 'geos.dll']
- binaries += [(os.path.join(lib_dir, f), '.') for f in dll_files]
+ conda_path = os.path.join(compat.base_prefix, 'Library', 'bin')
+ lib_paths.insert(0, conda_path)
+ original_path = os.environ['PATH']
+ try:
+ os.environ['PATH'] = os.pathsep.join(lib_paths)
+ dll_path = find_library('geos_c')
+ finally:
+ os.environ['PATH'] = original_path
+ if dll_path is None:
+ raise SystemExit(
+ "Error: geos_c.dll not found, required by hook-shapely.py.\n"
+ "Please check your installation or provide a pull request to "
+ "PyInstaller to update hook-shapely.py.")
+ binaries += [(dll_path, '.')]
elif compat.is_linux:
lib_dir = os.path.join(pkg_dir, '.libs')
dest_dir = os.path.join('shapely', '.libs')
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-shapely.py b/PyInstaller/hooks/hook-shapely.py\n--- a/PyInstaller/hooks/hook-shapely.py\n+++ b/PyInstaller/hooks/hook-shapely.py\n@@ -10,6 +10,7 @@\n #-----------------------------------------------------------------------------\n \n import os\n+from ctypes.util import find_library\n \n from PyInstaller.utils.hooks import get_package_paths\n from PyInstaller.utils.hooks import is_module_satisfies\n@@ -23,12 +24,25 @@\n \n binaries = []\n if compat.is_win:\n+ # Search conda directory if conda is active, then search standard\n+ # directory. This is the same order of precidence used in shapely.\n+ standard_path = os.path.join(pkg_dir, 'DLLs')\n+ lib_paths = [standard_path, os.environ['PATH']]\n if compat.is_conda:\n- lib_dir = os.path.join(compat.base_prefix, 'Library', 'bin')\n- else:\n- lib_dir = os.path.join(pkg_dir, 'DLLs')\n- dll_files = ['geos_c.dll', 'geos.dll']\n- binaries += [(os.path.join(lib_dir, f), '.') for f in dll_files]\n+ conda_path = os.path.join(compat.base_prefix, 'Library', 'bin')\n+ lib_paths.insert(0, conda_path)\n+ original_path = os.environ['PATH']\n+ try:\n+ os.environ['PATH'] = os.pathsep.join(lib_paths)\n+ dll_path = find_library('geos_c')\n+ finally:\n+ os.environ['PATH'] = original_path\n+ if dll_path is None:\n+ raise SystemExit(\n+ \"Error: geos_c.dll not found, required by hook-shapely.py.\\n\"\n+ \"Please check your installation or provide a pull request to \"\n+ \"PyInstaller to update hook-shapely.py.\")\n+ binaries += [(dll_path, '.')]\n elif compat.is_linux:\n lib_dir = os.path.join(pkg_dir, '.libs')\n dest_dir = os.path.join('shapely', '.libs')\n", "issue": "shapely hook doesn't work on windows\nUsing current develop, the shapely hook fails when it runs `binaries += [(os.path.join(lib_dir, f), '') for f in os.listdir(lib_dir)]`. `lib_dir` here equals `Lib/site-packages/shapely/DLLs`. The actual directory on my conda python 3.6 installation is `Library/bin/`. My old spec file uses the following ugly code to copy these libraries over:\r\n\r\n```\r\n lib_dir = sys.executable.replace(\"python.exe\", os.path.join(\"Library\", \"bin\"))\r\n binaries += [(os.path.join(lib_dir, 'geos_c.dll'), '')]\r\n binaries += [(os.path.join(lib_dir, 'geos.dll'), '')]\r\n binaries += [(os.path.join(lib_dir, 'mkl_*.dll'), '')]\r\n```\r\n\r\nIs there a better way to get a hold of this Library directory with some pyinstaller utility function? Does anyone know if other python environments (non-conda) have the directory used in the hook or @durden did you just guess on the Windows path?\r\n\r\nSide issue: Shapely 1.6+ doesn't seem to work on at least windows (haven't updated on other platforms). It fails to find the geos libraries mentioned above unless you execute the pyinstaller-made (inno setup packaged) executable from the install directory (`C:\\Program Files (x86)\\myprgm\\bin\\`). For now I'm just downgrading to 1.5.17.\nshapely hook doesn't work on windows\nUsing current develop, the shapely hook fails when it runs `binaries += [(os.path.join(lib_dir, f), '') for f in os.listdir(lib_dir)]`. `lib_dir` here equals `Lib/site-packages/shapely/DLLs`. The actual directory on my conda python 3.6 installation is `Library/bin/`. My old spec file uses the following ugly code to copy these libraries over:\r\n\r\n```\r\n lib_dir = sys.executable.replace(\"python.exe\", os.path.join(\"Library\", \"bin\"))\r\n binaries += [(os.path.join(lib_dir, 'geos_c.dll'), '')]\r\n binaries += [(os.path.join(lib_dir, 'geos.dll'), '')]\r\n binaries += [(os.path.join(lib_dir, 'mkl_*.dll'), '')]\r\n```\r\n\r\nIs there a better way to get a hold of this Library directory with some pyinstaller utility function? Does anyone know if other python environments (non-conda) have the directory used in the hook or @durden did you just guess on the Windows path?\r\n\r\nSide issue: Shapely 1.6+ doesn't seem to work on at least windows (haven't updated on other platforms). It fails to find the geos libraries mentioned above unless you execute the pyinstaller-made (inno setup packaged) executable from the install directory (`C:\\Program Files (x86)\\myprgm\\bin\\`). For now I'm just downgrading to 1.5.17.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2017-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nimport os\n\nfrom PyInstaller.utils.hooks import get_package_paths\nfrom PyInstaller.utils.hooks import is_module_satisfies\nfrom PyInstaller import compat\n\n# Necessary when using the vectorized subpackage\nhiddenimports = ['shapely.prepared']\n\npkg_base, pkg_dir = get_package_paths('shapely')\n\n\nbinaries = []\nif compat.is_win:\n if compat.is_conda:\n lib_dir = os.path.join(compat.base_prefix, 'Library', 'bin')\n else:\n lib_dir = os.path.join(pkg_dir, 'DLLs')\n dll_files = ['geos_c.dll', 'geos.dll']\n binaries += [(os.path.join(lib_dir, f), '.') for f in dll_files]\nelif compat.is_linux:\n lib_dir = os.path.join(pkg_dir, '.libs')\n dest_dir = os.path.join('shapely', '.libs')\n\n # This duplicates the libgeos*.so* files in the build. PyInstaller will\n # copy them into the root of the build by default, but shapely cannot load\n # them from there in linux IF shapely was installed via a whl file. The\n # whl bundles its' own libgeos with a different name, something like\n # libgeos_c-*.so.* but shapely tries to load libgeos_c.so if there isn't a\n # ./libs directory under its' package. There is a proposed fix for this in\n # shapely but it has not been accepted it:\n # https://github.com/Toblerity/Shapely/pull/485\n if is_module_satisfies('shapely <= 1.6'):\n binaries += [(os.path.join(lib_dir, f), dest_dir) for f in os.listdir(lib_dir)]\n", "path": "PyInstaller/hooks/hook-shapely.py"}]} | 1,748 | 467 |
gh_patches_debug_4112 | rasdani/github-patches | git_diff | getsentry__sentry-12417 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Google Pubsub push messages mis-identified as crawler
## Important Details
How are you running Sentry?
* [ ] On-Premise docker [Version xyz]
* [x] Saas (sentry.io)
* [ ] Other [briefly describe your environment]
## Description
We get the Sentry API error `Sentry responded with an API error: APIError(Event dropped due to filter: web-crawlers)` when there's an exception in a [Google Pubsub push](https://cloud.google.com/pubsub/docs/push) handler.
Apparently the user agent is `APIs-Google`.
## Steps to Reproduce
1. Set up a Google Pubsub push HTTP event handler
2. Have an exception in the message handler code
3. Not get report in Sentry
### What you expected to happen
`APIs-Google` isn't identified as a web crawler.
### Possible Solution
Improve the regex? 😸
</issue>
<code>
[start of src/sentry/filters/web_crawlers.py]
1 from __future__ import absolute_import
2
3 import re
4
5 from .base import Filter
6 from sentry.utils.data_filters import FilterStatKeys
7 from sentry.utils.safe import get_path
8
9 # not all of these agents are guaranteed to execute JavaScript, but to avoid
10 # overhead of identifying which ones do, and which ones will over time we simply
11 # target all of the major ones
12 CRAWLERS = re.compile(
13 r'|'.join(
14 (
15 # various Google services
16 r'AdsBot',
17 # Google Adsense
18 r'Mediapartners',
19 # Google+ and Google web search
20 r'Google',
21 # Bing search
22 r'BingBot',
23 r'BingPreview',
24 # Baidu search
25 r'Baiduspider',
26 # Yahoo
27 r'Slurp',
28 # Sogou
29 r'Sogou',
30 # facebook
31 r'facebook',
32 # Alexa
33 r'ia_archiver',
34 # Generic bot
35 r'bots?[\/\s\)\;]',
36 # Generic spider
37 r'spider[\/\s\)\;]',
38 # Slack - see https://api.slack.com/robots
39 r'Slack',
40 # Google indexing bot
41 r'Calypso AppCrawler',
42 )
43 ),
44 re.I
45 )
46
47
48 class WebCrawlersFilter(Filter):
49 id = FilterStatKeys.WEB_CRAWLER
50 name = 'Filter out known web crawlers'
51 description = 'Some crawlers may execute pages in incompatible ways which then cause errors that are unlikely to be seen by a normal user.'
52 default = True
53
54 def get_user_agent(self, data):
55 try:
56 for key, value in get_path(data, 'request', 'headers', filter=True) or ():
57 if key.lower() == 'user-agent':
58 return value
59 except LookupError:
60 return ''
61
62 def test(self, data):
63 # TODO(dcramer): we could also look at UA parser and use the 'Spider'
64 # device type
65 user_agent = self.get_user_agent(data)
66 if not user_agent:
67 return False
68 return bool(CRAWLERS.search(user_agent))
69
[end of src/sentry/filters/web_crawlers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/filters/web_crawlers.py b/src/sentry/filters/web_crawlers.py
--- a/src/sentry/filters/web_crawlers.py
+++ b/src/sentry/filters/web_crawlers.py
@@ -16,8 +16,8 @@
r'AdsBot',
# Google Adsense
r'Mediapartners',
- # Google+ and Google web search
- r'Google',
+ # Google+ and Google web search, but not apis-google
+ r'(?<!APIs-)Google',
# Bing search
r'BingBot',
r'BingPreview',
| {"golden_diff": "diff --git a/src/sentry/filters/web_crawlers.py b/src/sentry/filters/web_crawlers.py\n--- a/src/sentry/filters/web_crawlers.py\n+++ b/src/sentry/filters/web_crawlers.py\n@@ -16,8 +16,8 @@\n r'AdsBot',\n # Google Adsense\n r'Mediapartners',\n- # Google+ and Google web search\n- r'Google',\n+ # Google+ and Google web search, but not apis-google\n+ r'(?<!APIs-)Google',\n # Bing search\n r'BingBot',\n r'BingPreview',\n", "issue": "Google Pubsub push messages mis-identified as crawler\n## Important Details\r\n\r\nHow are you running Sentry?\r\n\r\n* [ ] On-Premise docker [Version xyz]\r\n* [x] Saas (sentry.io)\r\n* [ ] Other [briefly describe your environment]\r\n\r\n## Description\r\n\r\nWe get the Sentry API error `Sentry responded with an API error: APIError(Event dropped due to filter: web-crawlers)` when there's an exception in a [Google Pubsub push](https://cloud.google.com/pubsub/docs/push) handler.\r\n\r\nApparently the user agent is `APIs-Google`.\r\n\r\n## Steps to Reproduce\r\n\r\n1. Set up a Google Pubsub push HTTP event handler\r\n2. Have an exception in the message handler code\r\n3. Not get report in Sentry\r\n\r\n### What you expected to happen\r\n\r\n`APIs-Google` isn't identified as a web crawler.\r\n\r\n### Possible Solution\r\n\r\nImprove the regex? \ud83d\ude38 \r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport re\n\nfrom .base import Filter\nfrom sentry.utils.data_filters import FilterStatKeys\nfrom sentry.utils.safe import get_path\n\n# not all of these agents are guaranteed to execute JavaScript, but to avoid\n# overhead of identifying which ones do, and which ones will over time we simply\n# target all of the major ones\nCRAWLERS = re.compile(\n r'|'.join(\n (\n # various Google services\n r'AdsBot',\n # Google Adsense\n r'Mediapartners',\n # Google+ and Google web search\n r'Google',\n # Bing search\n r'BingBot',\n r'BingPreview',\n # Baidu search\n r'Baiduspider',\n # Yahoo\n r'Slurp',\n # Sogou\n r'Sogou',\n # facebook\n r'facebook',\n # Alexa\n r'ia_archiver',\n # Generic bot\n r'bots?[\\/\\s\\)\\;]',\n # Generic spider\n r'spider[\\/\\s\\)\\;]',\n # Slack - see https://api.slack.com/robots\n r'Slack',\n # Google indexing bot\n r'Calypso AppCrawler',\n )\n ),\n re.I\n)\n\n\nclass WebCrawlersFilter(Filter):\n id = FilterStatKeys.WEB_CRAWLER\n name = 'Filter out known web crawlers'\n description = 'Some crawlers may execute pages in incompatible ways which then cause errors that are unlikely to be seen by a normal user.'\n default = True\n\n def get_user_agent(self, data):\n try:\n for key, value in get_path(data, 'request', 'headers', filter=True) or ():\n if key.lower() == 'user-agent':\n return value\n except LookupError:\n return ''\n\n def test(self, data):\n # TODO(dcramer): we could also look at UA parser and use the 'Spider'\n # device type\n user_agent = self.get_user_agent(data)\n if not user_agent:\n return False\n return bool(CRAWLERS.search(user_agent))\n", "path": "src/sentry/filters/web_crawlers.py"}]} | 1,349 | 144 |
gh_patches_debug_30232 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-5315 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/kernel/triton/rms_layernorm.py]
1 import torch
2
3 try:
4 import triton
5 import triton.language as tl
6
7 HAS_TRITON = True
8 except ImportError:
9 HAS_TRITON = False
10 print("please install triton from https://github.com/openai/triton")
11
12 if HAS_TRITON:
13 # CREDITS: These functions are adapted from the Triton tutorial
14 # https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html
15
16 @triton.jit
17 def _rmsnorm_kernel(
18 X, # pointer to the input
19 Y, # pointer to the output
20 W, # pointer to the weights
21 stride, # how much to increase the pointer when moving by 1 row
22 N, # number of columns in X
23 eps, # epsilon to avoid division by zero
24 BLOCK_SIZE: tl.constexpr,
25 ):
26
27 # This triton kernel implements Root Mean Square Layer Norm (RMSNorm).
28
29 # Map the program id to the row of X and Y it should compute.
30 row = tl.program_id(0)
31 Y += row * stride
32 X += row * stride
33 # Compute variance
34 _var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
35 for off in range(0, N, BLOCK_SIZE):
36 cols = off + tl.arange(0, BLOCK_SIZE)
37 x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
38 x = tl.where(cols < N, x, 0.0)
39 _var += x * x
40 var = tl.sum(_var, axis=0) / N
41 rstd = 1 / tl.sqrt(var + eps)
42 # Normalize and apply linear transformation
43 for off in range(0, N, BLOCK_SIZE):
44 cols = off + tl.arange(0, BLOCK_SIZE)
45 mask = cols < N
46 w = tl.load(W + cols, mask=mask)
47 x = tl.load(X + cols, mask=mask, other=0.0).to(tl.float32)
48 x_hat = x * rstd
49 y = x_hat * w
50 # Write output
51 tl.store(Y + cols, y.to(tl.float16), mask=mask)
52
53 @torch.no_grad()
54 def rms_layernorm(x, weight, eps):
55 # allocate output
56 y = torch.empty_like(x)
57 # reshape input data into 2D tensor
58 x_arg = x.reshape(-1, x.shape[-1])
59 M, N = x_arg.shape
60 # Less than 64KB per feature: enqueue fused kernel
61 MAX_FUSED_SIZE = 65536 // x.element_size()
62 BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
63 if N > BLOCK_SIZE:
64 raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
65 # heuristics for number of warps
66 num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
67 # enqueue kernel
68 _rmsnorm_kernel[(M,)](
69 x_arg, y, weight, x_arg.stride(0), N, eps, BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps
70 )
71 return y
72
[end of colossalai/kernel/triton/rms_layernorm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/kernel/triton/rms_layernorm.py b/colossalai/kernel/triton/rms_layernorm.py
--- a/colossalai/kernel/triton/rms_layernorm.py
+++ b/colossalai/kernel/triton/rms_layernorm.py
@@ -23,7 +23,6 @@
eps, # epsilon to avoid division by zero
BLOCK_SIZE: tl.constexpr,
):
-
# This triton kernel implements Root Mean Square Layer Norm (RMSNorm).
# Map the program id to the row of X and Y it should compute.
@@ -54,18 +53,19 @@
def rms_layernorm(x, weight, eps):
# allocate output
y = torch.empty_like(x)
- # reshape input data into 2D tensor
+ # reshape input data into 2D tensor, (total token, hidden_size)
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // x.element_size()
+
BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
- if N > BLOCK_SIZE:
+ if N > MAX_FUSED_SIZE:
raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
+
# heuristics for number of warps
- num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
+ num_warps = min(max(triton.next_power_of_2(N) // 256, 8), 32)
+
# enqueue kernel
- _rmsnorm_kernel[(M,)](
- x_arg, y, weight, x_arg.stride(0), N, eps, BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps
- )
+ _rmsnorm_kernel[(M,)](x_arg, y, weight, x_arg.stride(0), N, eps, BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps)
return y
| {"golden_diff": "diff --git a/colossalai/kernel/triton/rms_layernorm.py b/colossalai/kernel/triton/rms_layernorm.py\n--- a/colossalai/kernel/triton/rms_layernorm.py\n+++ b/colossalai/kernel/triton/rms_layernorm.py\n@@ -23,7 +23,6 @@\n eps, # epsilon to avoid division by zero\n BLOCK_SIZE: tl.constexpr,\n ):\n-\n # This triton kernel implements Root Mean Square Layer Norm (RMSNorm).\n \n # Map the program id to the row of X and Y it should compute.\n@@ -54,18 +53,19 @@\n def rms_layernorm(x, weight, eps):\n # allocate output\n y = torch.empty_like(x)\n- # reshape input data into 2D tensor\n+ # reshape input data into 2D tensor, (total token, hidden_size)\n x_arg = x.reshape(-1, x.shape[-1])\n M, N = x_arg.shape\n # Less than 64KB per feature: enqueue fused kernel\n MAX_FUSED_SIZE = 65536 // x.element_size()\n+\n BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))\n- if N > BLOCK_SIZE:\n+ if N > MAX_FUSED_SIZE:\n raise RuntimeError(\"This layer norm doesn't support feature dim >= 64KB.\")\n+\n # heuristics for number of warps\n- num_warps = min(max(BLOCK_SIZE // 256, 1), 8)\n+ num_warps = min(max(triton.next_power_of_2(N) // 256, 8), 32)\n+\n # enqueue kernel\n- _rmsnorm_kernel[(M,)](\n- x_arg, y, weight, x_arg.stride(0), N, eps, BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps\n- )\n+ _rmsnorm_kernel[(M,)](x_arg, y, weight, x_arg.stride(0), N, eps, BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps)\n return y\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import torch\n\ntry:\n import triton\n import triton.language as tl\n\n HAS_TRITON = True\nexcept ImportError:\n HAS_TRITON = False\n print(\"please install triton from https://github.com/openai/triton\")\n\nif HAS_TRITON:\n # CREDITS: These functions are adapted from the Triton tutorial\n # https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html\n\n @triton.jit\n def _rmsnorm_kernel(\n X, # pointer to the input\n Y, # pointer to the output\n W, # pointer to the weights\n stride, # how much to increase the pointer when moving by 1 row\n N, # number of columns in X\n eps, # epsilon to avoid division by zero\n BLOCK_SIZE: tl.constexpr,\n ):\n\n # This triton kernel implements Root Mean Square Layer Norm (RMSNorm).\n\n # Map the program id to the row of X and Y it should compute.\n row = tl.program_id(0)\n Y += row * stride\n X += row * stride\n # Compute variance\n _var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)\n for off in range(0, N, BLOCK_SIZE):\n cols = off + tl.arange(0, BLOCK_SIZE)\n x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)\n x = tl.where(cols < N, x, 0.0)\n _var += x * x\n var = tl.sum(_var, axis=0) / N\n rstd = 1 / tl.sqrt(var + eps)\n # Normalize and apply linear transformation\n for off in range(0, N, BLOCK_SIZE):\n cols = off + tl.arange(0, BLOCK_SIZE)\n mask = cols < N\n w = tl.load(W + cols, mask=mask)\n x = tl.load(X + cols, mask=mask, other=0.0).to(tl.float32)\n x_hat = x * rstd\n y = x_hat * w\n # Write output\n tl.store(Y + cols, y.to(tl.float16), mask=mask)\n\n @torch.no_grad()\n def rms_layernorm(x, weight, eps):\n # allocate output\n y = torch.empty_like(x)\n # reshape input data into 2D tensor\n x_arg = x.reshape(-1, x.shape[-1])\n M, N = x_arg.shape\n # Less than 64KB per feature: enqueue fused kernel\n MAX_FUSED_SIZE = 65536 // x.element_size()\n BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))\n if N > BLOCK_SIZE:\n raise RuntimeError(\"This layer norm doesn't support feature dim >= 64KB.\")\n # heuristics for number of warps\n num_warps = min(max(BLOCK_SIZE // 256, 1), 8)\n # enqueue kernel\n _rmsnorm_kernel[(M,)](\n x_arg, y, weight, x_arg.stride(0), N, eps, BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps\n )\n return y\n", "path": "colossalai/kernel/triton/rms_layernorm.py"}]} | 1,443 | 483 |
gh_patches_debug_12269 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-513 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement schema list page
**Problem**
<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->
Users should be able to create a new schema, edit schema names, and delete schemas.
**Proposed solution**
<!-- A clear and concise description of your proposed solution or feature. -->
We should provide a way to do these actions from the UI using the schema list page introduced in the [design spec](https://wiki.mathesar.org/en/design/specs/schemas).
**Additional context**
<!-- Add any other context or screenshots about the feature request here.-->
- #166
- #168
- #170
- #393
</issue>
<code>
[start of mathesar/urls.py]
1 from django.urls import include, path
2 from rest_framework_nested import routers
3
4 from mathesar.views import api, frontend
5
6
7 router = routers.DefaultRouter()
8 router.register(r'tables', api.TableViewSet, basename='table')
9 router.register(r'schemas', api.SchemaViewSet, basename='schema')
10 router.register(r'database_keys', api.DatabaseKeyViewSet, basename='database-key')
11 router.register(r'databases', api.DatabaseViewSet, basename='database')
12 router.register(r'data_files', api.DataFileViewSet, basename='data-file')
13
14 table_router = routers.NestedSimpleRouter(router, r'tables', lookup='table')
15 table_router.register(r'records', api.RecordViewSet, basename='table-record')
16 table_router.register(r'columns', api.ColumnViewSet, basename='table-column')
17
18 urlpatterns = [
19 path('', frontend.index, name="index"),
20 path('api/v0/', include(router.urls)),
21 path('api/v0/', include(table_router.urls)),
22 # TODO: Handle known urls like /favicon.ico etc.,
23 # Currenty, this catches all
24 path('<dbname>', frontend.index, name="index"),
25 ]
26
[end of mathesar/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/urls.py b/mathesar/urls.py
--- a/mathesar/urls.py
+++ b/mathesar/urls.py
@@ -1,4 +1,4 @@
-from django.urls import include, path
+from django.urls import include, path, re_path
from rest_framework_nested import routers
from mathesar.views import api, frontend
@@ -20,6 +20,6 @@
path('api/v0/', include(router.urls)),
path('api/v0/', include(table_router.urls)),
# TODO: Handle known urls like /favicon.ico etc.,
- # Currenty, this catches all
- path('<dbname>', frontend.index, name="index"),
+ # Currently, this catches all
+ re_path(r'(?P<dbname>\w+)/.*$', frontend.index, name="index"),
]
| {"golden_diff": "diff --git a/mathesar/urls.py b/mathesar/urls.py\n--- a/mathesar/urls.py\n+++ b/mathesar/urls.py\n@@ -1,4 +1,4 @@\n-from django.urls import include, path\n+from django.urls import include, path, re_path\n from rest_framework_nested import routers\n \n from mathesar.views import api, frontend\n@@ -20,6 +20,6 @@\n path('api/v0/', include(router.urls)),\n path('api/v0/', include(table_router.urls)),\n # TODO: Handle known urls like /favicon.ico etc.,\n- # Currenty, this catches all\n- path('<dbname>', frontend.index, name=\"index\"),\n+ # Currently, this catches all\n+ re_path(r'(?P<dbname>\\w+)/.*$', frontend.index, name=\"index\"),\n ]\n", "issue": "Implement schema list page\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nUsers should be able to create a new schema, edit schema names, and delete schemas.\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nWe should provide a way to do these actions from the UI using the schema list page introduced in the [design spec](https://wiki.mathesar.org/en/design/specs/schemas).\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\n- #166\r\n- #168 \r\n- #170\r\n- #393\n", "before_files": [{"content": "from django.urls import include, path\nfrom rest_framework_nested import routers\n\nfrom mathesar.views import api, frontend\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'tables', api.TableViewSet, basename='table')\nrouter.register(r'schemas', api.SchemaViewSet, basename='schema')\nrouter.register(r'database_keys', api.DatabaseKeyViewSet, basename='database-key')\nrouter.register(r'databases', api.DatabaseViewSet, basename='database')\nrouter.register(r'data_files', api.DataFileViewSet, basename='data-file')\n\ntable_router = routers.NestedSimpleRouter(router, r'tables', lookup='table')\ntable_router.register(r'records', api.RecordViewSet, basename='table-record')\ntable_router.register(r'columns', api.ColumnViewSet, basename='table-column')\n\nurlpatterns = [\n path('', frontend.index, name=\"index\"),\n path('api/v0/', include(router.urls)),\n path('api/v0/', include(table_router.urls)),\n # TODO: Handle known urls like /favicon.ico etc.,\n # Currenty, this catches all\n path('<dbname>', frontend.index, name=\"index\"),\n]\n", "path": "mathesar/urls.py"}]} | 959 | 182 |
gh_patches_debug_41643 | rasdani/github-patches | git_diff | microsoft__Qcodes-1171 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Keithley 2400 does not get added to the station cleanly
The ":read:" command and possibly others does not work when output is off but fails with an error. This is called when getting volt and current are snapshotted
We should wrap these calls in checking that output is off
</issue>
<code>
[start of qcodes/instrument_drivers/tektronix/Keithley_2400.py]
1 from qcodes import VisaInstrument
2 from qcodes.utils.validators import Strings, Enum
3
4
5 class Keithley_2400(VisaInstrument):
6 """
7 QCoDeS driver for the Keithley 2400 voltage source.
8 """
9 def __init__(self, name, address, **kwargs):
10 super().__init__(name, address, terminator='\n', **kwargs)
11
12 self.add_parameter('rangev',
13 get_cmd='SENS:VOLT:RANG?',
14 get_parser=float,
15 set_cmd='SOUR:VOLT:RANG {:f}',
16 label='Voltage range')
17
18 self.add_parameter('rangei',
19 get_cmd='SENS:CURR:RANG?',
20 get_parser=float,
21 set_cmd='SOUR:CURR:RANG {:f}',
22 label='Current range')
23
24 self.add_parameter('compliancev',
25 get_cmd='SENS:VOLT:PROT?',
26 get_parser=float,
27 set_cmd='SENS:VOLT:PROT {:f}',
28 label='Voltage Compliance')
29
30 self.add_parameter('compliancei',
31 get_cmd='SENS:CURR:PROT?',
32 get_parser=float,
33 set_cmd='SENS:CURR:PROT {:f}',
34 label='Current Compliance')
35
36 self.add_parameter('volt',
37 get_cmd=':READ?',
38 get_parser=self._volt_parser,
39 set_cmd=':SOUR:VOLT:LEV {:.8f}',
40 label='Voltage',
41 unit='V')
42
43 self.add_parameter('curr',
44 get_cmd=':READ?',
45 get_parser=self._curr_parser,
46 set_cmd=':SOUR:CURR:LEV {:.8f}',
47 label='Current',
48 unit='A')
49
50 self.add_parameter('mode',
51 vals=Enum('VOLT', 'CURR'),
52 get_cmd=':SOUR:FUNC?',
53 set_cmd=self._set_mode_and_sense,
54 label='Mode')
55
56 self.add_parameter('sense',
57 vals=Strings(),
58 get_cmd=':SENS:FUNC?',
59 set_cmd=':SENS:FUNC "{:s}"',
60 label='Sense mode')
61
62 self.add_parameter('output',
63 get_parser=int,
64 set_cmd=':OUTP:STAT {:d}',
65 get_cmd=':OUTP:STAT?')
66
67 self.add_parameter('nplcv',
68 get_cmd='SENS:VOLT:NPLC?',
69 get_parser=float,
70 set_cmd='SENS:VOLT:NPLC {:f}',
71 label='Voltage integration time')
72
73 self.add_parameter('nplci',
74 get_cmd='SENS:CURR:NPLC?',
75 get_parser=float,
76 set_cmd='SENS:CURR:NPLC {:f}',
77 label='Current integration time')
78
79 self.add_parameter('resistance',
80 get_cmd=':READ?',
81 get_parser=self._resistance_parser,
82 label='Resistance',
83 unit='Ohm')
84
85 def _set_mode_and_sense(self, msg):
86 # This helps set the correct read out curr/volt
87 if msg == 'VOLT':
88 self.sense('CURR')
89 elif msg == 'CURR':
90 self.sense('VOLT')
91 else:
92 raise AttributeError('Mode does not exist')
93 self.write(':SOUR:FUNC {:s}'.format(msg))
94
95 def reset(self):
96 """
97 Reset the instrument. When the instrument is reset, it performs the
98 following actions.
99
100 Returns the SourceMeter to the GPIB default conditions.
101
102 Cancels all pending commands.
103
104 Cancels all previously send `*OPC` and `*OPC?`
105 """
106 self.write(':*RST')
107
108 def _volt_parser(self, msg):
109 fields = [float(x) for x in msg.split(',')]
110 return fields[0]
111
112 def _curr_parser(self, msg):
113 fields = [float(x) for x in msg.split(',')]
114 return fields[1]
115
116 def _resistance_parser(self, msg):
117 fields = [float(x) for x in msg.split(',')]
118 return fields[0]/fields[1]
119
[end of qcodes/instrument_drivers/tektronix/Keithley_2400.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qcodes/instrument_drivers/tektronix/Keithley_2400.py b/qcodes/instrument_drivers/tektronix/Keithley_2400.py
--- a/qcodes/instrument_drivers/tektronix/Keithley_2400.py
+++ b/qcodes/instrument_drivers/tektronix/Keithley_2400.py
@@ -34,18 +34,31 @@
label='Current Compliance')
self.add_parameter('volt',
- get_cmd=':READ?',
+ get_cmd=self._get_read_output_protected,
get_parser=self._volt_parser,
set_cmd=':SOUR:VOLT:LEV {:.8f}',
label='Voltage',
- unit='V')
+ unit='V',
+ docstring="Sets voltage in 'VOLT' mode. "
+ "Get returns measured voltage if "
+ "sensing 'VOLT' otherwise it returns "
+ "setpoint value. "
+ "Note that it is an error to read voltage with "
+ "output off")
self.add_parameter('curr',
- get_cmd=':READ?',
+ get_cmd=self._get_read_output_protected,
get_parser=self._curr_parser,
set_cmd=':SOUR:CURR:LEV {:.8f}',
label='Current',
- unit='A')
+ unit='A',
+ docstring = "Sets current in 'CURR' mode. "
+ "Get returns measured current if "
+ "sensing 'CURR' otherwise it returns "
+ "setpoint value. "
+ "Note that it is an error to read current with "
+ "output off")
+
self.add_parameter('mode',
vals=Enum('VOLT', 'CURR'),
@@ -77,10 +90,32 @@
label='Current integration time')
self.add_parameter('resistance',
- get_cmd=':READ?',
+ get_cmd=self._get_read_output_protected,
get_parser=self._resistance_parser,
label='Resistance',
- unit='Ohm')
+ unit='Ohm',
+ docstring="Measure resistance from current and voltage "
+ "Note that it is an error to read current "
+ "and voltage with output off")
+
+ def _get_read_output_protected(self) -> str:
+ """
+ This wrapper function around ":READ?" exists because calling
+ ":READ?" on an instrument with output disabled is an error.
+ So first we check that output is on and if not we return
+ nan for volt, curr etc.
+ """
+ output = self.output.get_latest()
+ if output is None:
+ # if get_latest returns None we have
+ # to ask the instrument for the status of output
+ output = self.output.get()
+
+ if output == 1:
+ msg = self.ask(':READ?')
+ else:
+ raise RuntimeError("Cannot perform read with output off")
+ return msg
def _set_mode_and_sense(self, msg):
# This helps set the correct read out curr/volt
@@ -115,4 +150,5 @@
def _resistance_parser(self, msg):
fields = [float(x) for x in msg.split(',')]
- return fields[0]/fields[1]
+ res = fields[0] / fields[1]
+ return res
| {"golden_diff": "diff --git a/qcodes/instrument_drivers/tektronix/Keithley_2400.py b/qcodes/instrument_drivers/tektronix/Keithley_2400.py\n--- a/qcodes/instrument_drivers/tektronix/Keithley_2400.py\n+++ b/qcodes/instrument_drivers/tektronix/Keithley_2400.py\n@@ -34,18 +34,31 @@\n label='Current Compliance')\n \n self.add_parameter('volt',\n- get_cmd=':READ?',\n+ get_cmd=self._get_read_output_protected,\n get_parser=self._volt_parser,\n set_cmd=':SOUR:VOLT:LEV {:.8f}',\n label='Voltage',\n- unit='V')\n+ unit='V',\n+ docstring=\"Sets voltage in 'VOLT' mode. \"\n+ \"Get returns measured voltage if \"\n+ \"sensing 'VOLT' otherwise it returns \"\n+ \"setpoint value. \"\n+ \"Note that it is an error to read voltage with \"\n+ \"output off\")\n \n self.add_parameter('curr',\n- get_cmd=':READ?',\n+ get_cmd=self._get_read_output_protected,\n get_parser=self._curr_parser,\n set_cmd=':SOUR:CURR:LEV {:.8f}',\n label='Current',\n- unit='A')\n+ unit='A',\n+ docstring = \"Sets current in 'CURR' mode. \"\n+ \"Get returns measured current if \"\n+ \"sensing 'CURR' otherwise it returns \"\n+ \"setpoint value. \"\n+ \"Note that it is an error to read current with \"\n+ \"output off\")\n+\n \n self.add_parameter('mode',\n vals=Enum('VOLT', 'CURR'),\n@@ -77,10 +90,32 @@\n label='Current integration time')\n \n self.add_parameter('resistance',\n- get_cmd=':READ?',\n+ get_cmd=self._get_read_output_protected,\n get_parser=self._resistance_parser,\n label='Resistance',\n- unit='Ohm')\n+ unit='Ohm',\n+ docstring=\"Measure resistance from current and voltage \"\n+ \"Note that it is an error to read current \"\n+ \"and voltage with output off\")\n+\n+ def _get_read_output_protected(self) -> str:\n+ \"\"\"\n+ This wrapper function around \":READ?\" exists because calling\n+ \":READ?\" on an instrument with output disabled is an error.\n+ So first we check that output is on and if not we return\n+ nan for volt, curr etc.\n+ \"\"\"\n+ output = self.output.get_latest()\n+ if output is None:\n+ # if get_latest returns None we have\n+ # to ask the instrument for the status of output\n+ output = self.output.get()\n+\n+ if output == 1:\n+ msg = self.ask(':READ?')\n+ else:\n+ raise RuntimeError(\"Cannot perform read with output off\")\n+ return msg\n \n def _set_mode_and_sense(self, msg):\n # This helps set the correct read out curr/volt\n@@ -115,4 +150,5 @@\n \n def _resistance_parser(self, msg):\n fields = [float(x) for x in msg.split(',')]\n- return fields[0]/fields[1]\n+ res = fields[0] / fields[1]\n+ return res\n", "issue": "Keithley 2400 does not get added to the station cleanly\nThe \":read:\" command and possibly others does not work when output is off but fails with an error. This is called when getting volt and current are snapshotted \r\n\r\nWe should wrap these calls in checking that output is off\n", "before_files": [{"content": "from qcodes import VisaInstrument\nfrom qcodes.utils.validators import Strings, Enum\n\n\nclass Keithley_2400(VisaInstrument):\n \"\"\"\n QCoDeS driver for the Keithley 2400 voltage source.\n \"\"\"\n def __init__(self, name, address, **kwargs):\n super().__init__(name, address, terminator='\\n', **kwargs)\n\n self.add_parameter('rangev',\n get_cmd='SENS:VOLT:RANG?',\n get_parser=float,\n set_cmd='SOUR:VOLT:RANG {:f}',\n label='Voltage range')\n\n self.add_parameter('rangei',\n get_cmd='SENS:CURR:RANG?',\n get_parser=float,\n set_cmd='SOUR:CURR:RANG {:f}',\n label='Current range')\n\n self.add_parameter('compliancev',\n get_cmd='SENS:VOLT:PROT?',\n get_parser=float,\n set_cmd='SENS:VOLT:PROT {:f}',\n label='Voltage Compliance')\n\n self.add_parameter('compliancei',\n get_cmd='SENS:CURR:PROT?',\n get_parser=float,\n set_cmd='SENS:CURR:PROT {:f}',\n label='Current Compliance')\n\n self.add_parameter('volt',\n get_cmd=':READ?',\n get_parser=self._volt_parser,\n set_cmd=':SOUR:VOLT:LEV {:.8f}',\n label='Voltage',\n unit='V')\n\n self.add_parameter('curr',\n get_cmd=':READ?',\n get_parser=self._curr_parser,\n set_cmd=':SOUR:CURR:LEV {:.8f}',\n label='Current',\n unit='A')\n\n self.add_parameter('mode',\n vals=Enum('VOLT', 'CURR'),\n get_cmd=':SOUR:FUNC?',\n set_cmd=self._set_mode_and_sense,\n label='Mode')\n\n self.add_parameter('sense',\n vals=Strings(),\n get_cmd=':SENS:FUNC?',\n set_cmd=':SENS:FUNC \"{:s}\"',\n label='Sense mode')\n\n self.add_parameter('output',\n get_parser=int,\n set_cmd=':OUTP:STAT {:d}',\n get_cmd=':OUTP:STAT?')\n\n self.add_parameter('nplcv',\n get_cmd='SENS:VOLT:NPLC?',\n get_parser=float,\n set_cmd='SENS:VOLT:NPLC {:f}',\n label='Voltage integration time')\n\n self.add_parameter('nplci',\n get_cmd='SENS:CURR:NPLC?',\n get_parser=float,\n set_cmd='SENS:CURR:NPLC {:f}',\n label='Current integration time')\n\n self.add_parameter('resistance',\n get_cmd=':READ?',\n get_parser=self._resistance_parser,\n label='Resistance',\n unit='Ohm')\n\n def _set_mode_and_sense(self, msg):\n # This helps set the correct read out curr/volt\n if msg == 'VOLT':\n self.sense('CURR')\n elif msg == 'CURR':\n self.sense('VOLT')\n else:\n raise AttributeError('Mode does not exist')\n self.write(':SOUR:FUNC {:s}'.format(msg))\n\n def reset(self):\n \"\"\"\n Reset the instrument. When the instrument is reset, it performs the\n following actions.\n\n Returns the SourceMeter to the GPIB default conditions.\n\n Cancels all pending commands.\n\n Cancels all previously send `*OPC` and `*OPC?`\n \"\"\"\n self.write(':*RST')\n\n def _volt_parser(self, msg):\n fields = [float(x) for x in msg.split(',')]\n return fields[0]\n\n def _curr_parser(self, msg):\n fields = [float(x) for x in msg.split(',')]\n return fields[1]\n\n def _resistance_parser(self, msg):\n fields = [float(x) for x in msg.split(',')]\n return fields[0]/fields[1]\n", "path": "qcodes/instrument_drivers/tektronix/Keithley_2400.py"}]} | 1,756 | 766 |
gh_patches_debug_17736 | rasdani/github-patches | git_diff | beeware__toga-31 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"ImportError: cannot import name WebKit" on Ubuntu 14.04
Installed toga via global `sudo pip install toga`. Then, tried to import it:
```
>>> import toga
ERROR:root:Could not find any typelib for WebKit
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/toga/__init__.py", line 86, in <module>
from .platform.gtk.app import *
File "/usr/local/lib/python2.7/dist-packages/toga/platform/gtk/app.py", line 7, in <module>
from .window import Window
File "/usr/local/lib/python2.7/dist-packages/toga/platform/gtk/window.py", line 6, in <module>
from .command import SEPARATOR, SPACER, EXPANDING_SPACER
File "/usr/local/lib/python2.7/dist-packages/toga/platform/gtk/command.py", line 1, in <module>
from .widgets import Icon
File "/usr/local/lib/python2.7/dist-packages/toga/platform/gtk/widgets/__init__.py", line 17, in <module>
from .webview import WebView
File "/usr/local/lib/python2.7/dist-packages/toga/platform/gtk/widgets/webview.py", line 3, in <module>
from gi.repository import Gtk, WebKit
ImportError: cannot import name WebKit
```
Did a `sudo apt-get install python-webkit`, but still getting the same import error. I'm running Ubuntu under Crouton on a Chromebook, which doesn't always contain the full set of packages.
Since the application I aim to create (a GUI launcher for [KA Lite](https://github.com/learningequality/ka-lite/)) would rely on toga's awesome dedication to being pure Python and not needing any extra packages to be installed to work cross-platform, and since we wouldn't be needing the WebView, would it be possible to have it handle a lack of WebKit more gracefully, only erroring out if a WebView was actually used? Thanks!
</issue>
<code>
[start of toga/platform/gtk/widgets/webview.py]
1 from __future__ import print_function, absolute_import, division
2
3 from gi.repository import Gtk, WebKit
4
5 from .base import Widget
6
7
8 class WebView(Widget):
9 def __init__(self, url=None):
10 super(WebView, self).__init__()
11 self._url = url
12
13 self._webview = None
14
15 def _startup(self):
16 self._impl = Gtk.ScrolledWindow()
17 self._impl.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
18
19 self._webview = WebKit.WebView()
20
21 if self._url:
22 self._webview.load_uri(self._url)
23
24 self._impl.add(self._webview)
25 self._impl.set_min_content_width(200)
26 self._impl.set_min_content_height(200)
27
28 @property
29 def url(self):
30 return self._url
31
32 @url.setter
33 def url(self, value):
34 self._url = value
35 if self._impl:
36 self._webview.load_uri(self._url)
37
[end of toga/platform/gtk/widgets/webview.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/toga/platform/gtk/widgets/webview.py b/toga/platform/gtk/widgets/webview.py
--- a/toga/platform/gtk/widgets/webview.py
+++ b/toga/platform/gtk/widgets/webview.py
@@ -1,6 +1,13 @@
from __future__ import print_function, absolute_import, division
-from gi.repository import Gtk, WebKit
+from gi.repository import Gtk
+
+# The following import sometimes fails; handle failure gracefully
+# (see https://github.com/pybee/toga/issues/26)
+try:
+ from gi.repository import WebKit
+except ImportError:
+ WebKit = None
from .base import Widget
@@ -13,6 +20,12 @@
self._webview = None
def _startup(self):
+
+ if WebKit is None:
+ raise RuntimeError(
+ "Import 'from gi.repository import WebKit' failed;" +
+ " may need to install gir1.2-webkit-3.0 or similar.")
+
self._impl = Gtk.ScrolledWindow()
self._impl.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
| {"golden_diff": "diff --git a/toga/platform/gtk/widgets/webview.py b/toga/platform/gtk/widgets/webview.py\n--- a/toga/platform/gtk/widgets/webview.py\n+++ b/toga/platform/gtk/widgets/webview.py\n@@ -1,6 +1,13 @@\n from __future__ import print_function, absolute_import, division\n \n-from gi.repository import Gtk, WebKit\n+from gi.repository import Gtk\n+\n+# The following import sometimes fails; handle failure gracefully\n+# (see https://github.com/pybee/toga/issues/26)\n+try:\n+ from gi.repository import WebKit\n+except ImportError:\n+ WebKit = None\n \n from .base import Widget\n \n@@ -13,6 +20,12 @@\n self._webview = None\n \n def _startup(self):\n+\n+ if WebKit is None:\n+ raise RuntimeError(\n+ \"Import 'from gi.repository import WebKit' failed;\" +\n+ \" may need to install gir1.2-webkit-3.0 or similar.\")\n+\n self._impl = Gtk.ScrolledWindow()\n self._impl.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)\n", "issue": "\"ImportError: cannot import name WebKit\" on Ubuntu 14.04\nInstalled toga via global `sudo pip install toga`. Then, tried to import it:\n\n```\n>>> import toga\nERROR:root:Could not find any typelib for WebKit\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"/usr/local/lib/python2.7/dist-packages/toga/__init__.py\", line 86, in <module>\n from .platform.gtk.app import *\n File \"/usr/local/lib/python2.7/dist-packages/toga/platform/gtk/app.py\", line 7, in <module>\n from .window import Window\n File \"/usr/local/lib/python2.7/dist-packages/toga/platform/gtk/window.py\", line 6, in <module>\n from .command import SEPARATOR, SPACER, EXPANDING_SPACER\n File \"/usr/local/lib/python2.7/dist-packages/toga/platform/gtk/command.py\", line 1, in <module>\n from .widgets import Icon\n File \"/usr/local/lib/python2.7/dist-packages/toga/platform/gtk/widgets/__init__.py\", line 17, in <module>\n from .webview import WebView\n File \"/usr/local/lib/python2.7/dist-packages/toga/platform/gtk/widgets/webview.py\", line 3, in <module>\n from gi.repository import Gtk, WebKit\nImportError: cannot import name WebKit\n```\n\nDid a `sudo apt-get install python-webkit`, but still getting the same import error. I'm running Ubuntu under Crouton on a Chromebook, which doesn't always contain the full set of packages.\n\nSince the application I aim to create (a GUI launcher for [KA Lite](https://github.com/learningequality/ka-lite/)) would rely on toga's awesome dedication to being pure Python and not needing any extra packages to be installed to work cross-platform, and since we wouldn't be needing the WebView, would it be possible to have it handle a lack of WebKit more gracefully, only erroring out if a WebView was actually used? Thanks!\n\n", "before_files": [{"content": "from __future__ import print_function, absolute_import, division\n\nfrom gi.repository import Gtk, WebKit\n\nfrom .base import Widget\n\n\nclass WebView(Widget):\n def __init__(self, url=None):\n super(WebView, self).__init__()\n self._url = url\n\n self._webview = None\n\n def _startup(self):\n self._impl = Gtk.ScrolledWindow()\n self._impl.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)\n\n self._webview = WebKit.WebView()\n\n if self._url:\n self._webview.load_uri(self._url)\n\n self._impl.add(self._webview)\n self._impl.set_min_content_width(200)\n self._impl.set_min_content_height(200)\n\n @property\n def url(self):\n return self._url\n\n @url.setter\n def url(self, value):\n self._url = value\n if self._impl:\n self._webview.load_uri(self._url)\n", "path": "toga/platform/gtk/widgets/webview.py"}]} | 1,300 | 255 |
gh_patches_debug_6994 | rasdani/github-patches | git_diff | comic__grand-challenge.org-2146 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Count of results displayed on the challenge card does not match leaderboard count
**Describe the bug**
The card for the node21 challenge currently notes there are 21 results. Clicking on this brings you to the leaderboard where only 2 results are present. It seems that the count is including submissions which failed and/or submissions where the evaluation failed, which is misleading.
**To Reproduce**
Steps to reproduce the behavior:
- Choose a challenge where the database includes many failed submissions or failed evaluations (e.g. node21 at present)
- View the card for this challenge (currently it is on the GC front page)
- Verify that the number of results shown on the card does not match the number of results on the leaderboard (click the number shown on the card).
**Expected behavior**
The number of reported results should match the number of results on the leaderboard
**Screenshots**

</issue>
<code>
[start of app/grandchallenge/challenges/tasks.py]
1 from celery import shared_task
2 from django.contrib.auth import get_user_model
3 from django.core.mail import mail_managers
4 from django.db.models import Count, Max
5 from requests import exceptions, get
6
7 from grandchallenge.challenges.models import Challenge, ExternalChallenge
8 from grandchallenge.evaluation.models import Evaluation
9 from grandchallenge.subdomains.utils import reverse
10
11
12 @shared_task
13 def update_challenge_results_cache():
14 challenges = Challenge.objects.all()
15 evaluation_info = (
16 Evaluation.objects.filter(published=True)
17 .values("submission__phase__challenge_id")
18 .annotate(
19 cached_num_results=Count("submission__phase__challenge_id"),
20 cached_latest_result=Max("created"),
21 )
22 )
23 evaluation_info_by_challenge = {
24 str(v["submission__phase__challenge_id"]): v for v in evaluation_info
25 }
26 participant_counts = (
27 get_user_model()
28 .objects.values("groups__participants_of_challenge")
29 .annotate(cached_num_participants=Count("pk"))
30 )
31 participant_counts_by_challenge = {
32 str(v["groups__participants_of_challenge"]): v
33 for v in participant_counts
34 }
35
36 for c in challenges:
37 c.cached_num_results = evaluation_info_by_challenge.get(
38 str(c.pk), {}
39 ).get("cached_num_results", 0)
40 c.cached_latest_result = evaluation_info_by_challenge.get(
41 str(c.pk), {}
42 ).get("cached_latest_result", None)
43 c.cached_num_participants = participant_counts_by_challenge.get(
44 str(c.pk), {}
45 ).get("cached_num_participants", 0)
46
47 Challenge.objects.bulk_update(
48 challenges,
49 [
50 "cached_num_results",
51 "cached_num_participants",
52 "cached_latest_result",
53 ],
54 )
55
56
57 @shared_task
58 def check_external_challenge_urls():
59 """
60 Checks that all external challenge urls are reachable.
61
62 Emails the managers if any of the challenges are not.
63 """
64 challenges = ExternalChallenge.objects.filter(hidden=False)
65 errors = []
66
67 for challenge in challenges:
68 try:
69 url = challenge.homepage
70 if not url.startswith("http"):
71 url = "http://" + url
72 r = get(url, timeout=60)
73 # raise an exception when we receive a http error (e.g., 404)
74 r.raise_for_status()
75 except exceptions.RequestException as err:
76 update_url = reverse(
77 "challenges:external-update",
78 kwargs={"short_name": challenge.short_name},
79 )
80 errors.append(
81 f"Error when trying to access '{challenge}': {err}. You can "
82 f"update it here: {update_url}"
83 )
84
85 if errors:
86 mail_managers(
87 subject=f"Unreachable external challenges ({len(errors)})",
88 message="\n\n".join(errors),
89 )
90
[end of app/grandchallenge/challenges/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/challenges/tasks.py b/app/grandchallenge/challenges/tasks.py
--- a/app/grandchallenge/challenges/tasks.py
+++ b/app/grandchallenge/challenges/tasks.py
@@ -13,7 +13,7 @@
def update_challenge_results_cache():
challenges = Challenge.objects.all()
evaluation_info = (
- Evaluation.objects.filter(published=True)
+ Evaluation.objects.filter(published=True, rank__gt=0)
.values("submission__phase__challenge_id")
.annotate(
cached_num_results=Count("submission__phase__challenge_id"),
| {"golden_diff": "diff --git a/app/grandchallenge/challenges/tasks.py b/app/grandchallenge/challenges/tasks.py\n--- a/app/grandchallenge/challenges/tasks.py\n+++ b/app/grandchallenge/challenges/tasks.py\n@@ -13,7 +13,7 @@\n def update_challenge_results_cache():\n challenges = Challenge.objects.all()\n evaluation_info = (\n- Evaluation.objects.filter(published=True)\n+ Evaluation.objects.filter(published=True, rank__gt=0)\n .values(\"submission__phase__challenge_id\")\n .annotate(\n cached_num_results=Count(\"submission__phase__challenge_id\"),\n", "issue": "Count of results displayed on the challenge card does not match leaderboard count\n**Describe the bug**\r\nThe card for the node21 challenge currently notes there are 21 results. Clicking on this brings you to the leaderboard where only 2 results are present. It seems that the count is including submissions which failed and/or submissions where the evaluation failed, which is misleading. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n - Choose a challenge where the database includes many failed submissions or failed evaluations (e.g. node21 at present)\r\n- View the card for this challenge (currently it is on the GC front page)\r\n - Verify that the number of results shown on the card does not match the number of results on the leaderboard (click the number shown on the card).\r\n\r\n**Expected behavior**\r\nThe number of reported results should match the number of results on the leaderboard\r\n\r\n**Screenshots**\r\n\r\n\r\n\n", "before_files": [{"content": "from celery import shared_task\nfrom django.contrib.auth import get_user_model\nfrom django.core.mail import mail_managers\nfrom django.db.models import Count, Max\nfrom requests import exceptions, get\n\nfrom grandchallenge.challenges.models import Challenge, ExternalChallenge\nfrom grandchallenge.evaluation.models import Evaluation\nfrom grandchallenge.subdomains.utils import reverse\n\n\n@shared_task\ndef update_challenge_results_cache():\n challenges = Challenge.objects.all()\n evaluation_info = (\n Evaluation.objects.filter(published=True)\n .values(\"submission__phase__challenge_id\")\n .annotate(\n cached_num_results=Count(\"submission__phase__challenge_id\"),\n cached_latest_result=Max(\"created\"),\n )\n )\n evaluation_info_by_challenge = {\n str(v[\"submission__phase__challenge_id\"]): v for v in evaluation_info\n }\n participant_counts = (\n get_user_model()\n .objects.values(\"groups__participants_of_challenge\")\n .annotate(cached_num_participants=Count(\"pk\"))\n )\n participant_counts_by_challenge = {\n str(v[\"groups__participants_of_challenge\"]): v\n for v in participant_counts\n }\n\n for c in challenges:\n c.cached_num_results = evaluation_info_by_challenge.get(\n str(c.pk), {}\n ).get(\"cached_num_results\", 0)\n c.cached_latest_result = evaluation_info_by_challenge.get(\n str(c.pk), {}\n ).get(\"cached_latest_result\", None)\n c.cached_num_participants = participant_counts_by_challenge.get(\n str(c.pk), {}\n ).get(\"cached_num_participants\", 0)\n\n Challenge.objects.bulk_update(\n challenges,\n [\n \"cached_num_results\",\n \"cached_num_participants\",\n \"cached_latest_result\",\n ],\n )\n\n\n@shared_task\ndef check_external_challenge_urls():\n \"\"\"\n Checks that all external challenge urls are reachable.\n\n Emails the managers if any of the challenges are not.\n \"\"\"\n challenges = ExternalChallenge.objects.filter(hidden=False)\n errors = []\n\n for challenge in challenges:\n try:\n url = challenge.homepage\n if not url.startswith(\"http\"):\n url = \"http://\" + url\n r = get(url, timeout=60)\n # raise an exception when we receive a http error (e.g., 404)\n r.raise_for_status()\n except exceptions.RequestException as err:\n update_url = reverse(\n \"challenges:external-update\",\n kwargs={\"short_name\": challenge.short_name},\n )\n errors.append(\n f\"Error when trying to access '{challenge}': {err}. You can \"\n f\"update it here: {update_url}\"\n )\n\n if errors:\n mail_managers(\n subject=f\"Unreachable external challenges ({len(errors)})\",\n message=\"\\n\\n\".join(errors),\n )\n", "path": "app/grandchallenge/challenges/tasks.py"}]} | 1,558 | 128 |
gh_patches_debug_1153 | rasdani/github-patches | git_diff | scverse__scanpy-997 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`datasets.pbmc68k_reduced` isn't contained in the pypi package anymore
This still works in `1.4.4.post1`. It's very likely caused by changes to `setup.py`. I experienced similar problems before and fixed them via `package_data`. But this got removed. It's probably only a problem for the source-based installs.
https://github.com/theislab/scanpy/commit/881f0bef31cdfe0df7333641dc847a60894b5c41#diff-2eeaed663bd0d25b7e608891384b7298
```
>>> import scanpy
>>> scanpy.__version__
<Version('1.4.5.post2')>
>>> scanpy.datasets.pbmc68k_reduced()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/scanpy/datasets/__init__.py", line 239, in pbmc68k_reduced
return read(filename)
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/scanpy/readwrite.py", line 114, in read
**kwargs,
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/scanpy/readwrite.py", line 524, in _read
return read_h5ad(filename, backed=backed)
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/anndata/readwrite/read.py", line 447, in read_h5ad
constructor_args = _read_args_from_h5ad(filename=filename, chunk_size=chunk_size)
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/anndata/readwrite/read.py", line 481, in _read_args_from_h5ad
f = h5py.File(filename, 'r')
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/anndata/h5py/h5sparse.py", line 162, in __init__
**kwds,
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/h5py/_hl/files.py", line 312, in __init__
fid = make_fid(name, mode, userblock_size, fapl, swmr=swmr)
File "/Users/alexwolf/miniconda3/lib/python3.6/site-packages/h5py/_hl/files.py", line 142, in make_fid
fid = h5f.open(name, flags, fapl=fapl)
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "h5py/h5f.pyx", line 78, in h5py.h5f.open
```
</issue>
<code>
[start of setup.py]
1 import sys
2
3 if sys.version_info < (3, 6):
4 sys.exit('scanpy requires Python >= 3.6')
5 from pathlib import Path
6
7 from setuptools import setup, find_packages
8
9
10 try:
11 from scanpy import __author__, __email__
12 except ImportError: # Deps not yet installed
13 __author__ = __email__ = ''
14
15 setup(
16 name='scanpy',
17 use_scm_version=True,
18 setup_requires=['setuptools_scm'],
19 description='Single-Cell Analysis in Python.',
20 long_description=Path('README.rst').read_text('utf-8'),
21 url='http://github.com/theislab/scanpy',
22 author=__author__,
23 author_email=__email__,
24 license='BSD',
25 python_requires='>=3.6',
26 install_requires=[
27 l.strip() for l in Path('requirements.txt').read_text('utf-8').splitlines()
28 ],
29 extras_require=dict(
30 louvain=['python-igraph', 'louvain>=0.6'],
31 leiden=['python-igraph', 'leidenalg'],
32 bbknn=['bbknn'],
33 rapids=['cudf', 'cuml', 'cugraph'],
34 magic=['magic-impute>=2.0'],
35 doc=[
36 'sphinx',
37 'sphinx_rtd_theme',
38 'sphinx_autodoc_typehints',
39 'scanpydoc>=0.4.3',
40 'typing_extensions; python_version < "3.8"', # for `Literal`
41 ],
42 test=[
43 'pytest>=4.4',
44 'dask[array]',
45 'fsspec',
46 'zappy',
47 'zarr',
48 'black',
49 'profimp',
50 ],
51 ),
52 packages=find_packages(),
53 entry_points=dict(console_scripts=['scanpy=scanpy.cli:console_main']),
54 zip_safe=False,
55 classifiers=[
56 'Development Status :: 5 - Production/Stable',
57 'Environment :: Console',
58 'Framework :: Jupyter',
59 'Intended Audience :: Developers',
60 'Intended Audience :: Science/Research',
61 'Natural Language :: English',
62 'Operating System :: MacOS :: MacOS X',
63 'Operating System :: Microsoft :: Windows',
64 'Operating System :: POSIX :: Linux',
65 'Programming Language :: Python :: 3',
66 'Programming Language :: Python :: 3.5',
67 'Programming Language :: Python :: 3.6',
68 'Programming Language :: Python :: 3.7',
69 'Topic :: Scientific/Engineering :: Bio-Informatics',
70 'Topic :: Scientific/Engineering :: Visualization',
71 ],
72 )
73
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -50,6 +50,7 @@
],
),
packages=find_packages(),
+ include_package_data=True,
entry_points=dict(console_scripts=['scanpy=scanpy.cli:console_main']),
zip_safe=False,
classifiers=[
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -50,6 +50,7 @@\n ],\n ),\n packages=find_packages(),\n+ include_package_data=True,\n entry_points=dict(console_scripts=['scanpy=scanpy.cli:console_main']),\n zip_safe=False,\n classifiers=[\n", "issue": "`datasets.pbmc68k_reduced` isn't contained in the pypi package anymore\nThis still works in `1.4.4.post1`. It's very likely caused by changes to `setup.py`. I experienced similar problems before and fixed them via `package_data`. But this got removed. It's probably only a problem for the source-based installs.\r\n\r\nhttps://github.com/theislab/scanpy/commit/881f0bef31cdfe0df7333641dc847a60894b5c41#diff-2eeaed663bd0d25b7e608891384b7298\r\n\r\n```\r\n>>> import scanpy\r\n>>> scanpy.__version__\r\n<Version('1.4.5.post2')>\r\n>>> scanpy.datasets.pbmc68k_reduced()\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/Users/alexwolf/miniconda3/lib/python3.6/site-packages/scanpy/datasets/__init__.py\", line 239, in pbmc68k_reduced\r\n return read(filename)\r\n File \"/Users/alexwolf/miniconda3/lib/python3.6/site-packages/scanpy/readwrite.py\", line 114, in read\r\n **kwargs,\r\n File \"/Users/alexwolf/miniconda3/lib/python3.6/site-packages/scanpy/readwrite.py\", line 524, in _read\r\n return read_h5ad(filename, backed=backed)\r\n File \"/Users/alexwolf/miniconda3/lib/python3.6/site-packages/anndata/readwrite/read.py\", line 447, in read_h5ad\r\n constructor_args = _read_args_from_h5ad(filename=filename, chunk_size=chunk_size)\r\n File \"/Users/alexwolf/miniconda3/lib/python3.6/site-packages/anndata/readwrite/read.py\", line 481, in _read_args_from_h5ad\r\n f = h5py.File(filename, 'r')\r\n File \"/Users/alexwolf/miniconda3/lib/python3.6/site-packages/anndata/h5py/h5sparse.py\", line 162, in __init__\r\n **kwds,\r\n File \"/Users/alexwolf/miniconda3/lib/python3.6/site-packages/h5py/_hl/files.py\", line 312, in __init__\r\n fid = make_fid(name, mode, userblock_size, fapl, swmr=swmr)\r\n File \"/Users/alexwolf/miniconda3/lib/python3.6/site-packages/h5py/_hl/files.py\", line 142, in make_fid\r\n fid = h5f.open(name, flags, fapl=fapl)\r\n File \"h5py/_objects.pyx\", line 54, in h5py._objects.with_phil.wrapper\r\n File \"h5py/_objects.pyx\", line 55, in h5py._objects.with_phil.wrapper\r\n File \"h5py/h5f.pyx\", line 78, in h5py.h5f.open\r\n```\n", "before_files": [{"content": "import sys\n\nif sys.version_info < (3, 6):\n sys.exit('scanpy requires Python >= 3.6')\nfrom pathlib import Path\n\nfrom setuptools import setup, find_packages\n\n\ntry:\n from scanpy import __author__, __email__\nexcept ImportError: # Deps not yet installed\n __author__ = __email__ = ''\n\nsetup(\n name='scanpy',\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n description='Single-Cell Analysis in Python.',\n long_description=Path('README.rst').read_text('utf-8'),\n url='http://github.com/theislab/scanpy',\n author=__author__,\n author_email=__email__,\n license='BSD',\n python_requires='>=3.6',\n install_requires=[\n l.strip() for l in Path('requirements.txt').read_text('utf-8').splitlines()\n ],\n extras_require=dict(\n louvain=['python-igraph', 'louvain>=0.6'],\n leiden=['python-igraph', 'leidenalg'],\n bbknn=['bbknn'],\n rapids=['cudf', 'cuml', 'cugraph'],\n magic=['magic-impute>=2.0'],\n doc=[\n 'sphinx',\n 'sphinx_rtd_theme',\n 'sphinx_autodoc_typehints',\n 'scanpydoc>=0.4.3',\n 'typing_extensions; python_version < \"3.8\"', # for `Literal`\n ],\n test=[\n 'pytest>=4.4',\n 'dask[array]',\n 'fsspec',\n 'zappy',\n 'zarr',\n 'black',\n 'profimp',\n ],\n ),\n packages=find_packages(),\n entry_points=dict(console_scripts=['scanpy=scanpy.cli:console_main']),\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Framework :: Jupyter',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Visualization',\n ],\n)\n", "path": "setup.py"}]} | 1,913 | 74 |
gh_patches_debug_30970 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1512 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check if ThreadLocalRuntimeContext can be removed since python3.4 support is dropped
https://github.com/open-telemetry/opentelemetry-python/blob/master/opentelemetry-api/src/opentelemetry/context/threadlocal_context.py#L21
</issue>
<code>
[start of opentelemetry-api/src/opentelemetry/context/threadlocal_context.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import threading
16
17 from opentelemetry.context.context import Context, RuntimeContext
18
19
20 class ThreadLocalRuntimeContext(RuntimeContext):
21 """An implementation of the RuntimeContext interface
22 which uses thread-local storage under the hood. This
23 implementation is available for usage with Python 3.4.
24 """
25
26 class Token:
27 def __init__(self, context: Context) -> None:
28 self._context = context
29
30 _CONTEXT_KEY = "current_context"
31
32 def __init__(self) -> None:
33 self._current_context = threading.local()
34
35 def attach(self, context: Context) -> object:
36 """See `opentelemetry.context.RuntimeContext.attach`."""
37 current = self.get_current()
38 setattr(self._current_context, self._CONTEXT_KEY, context)
39 return self.Token(current)
40
41 def get_current(self) -> Context:
42 """See `opentelemetry.context.RuntimeContext.get_current`."""
43 if not hasattr(self._current_context, self._CONTEXT_KEY):
44 setattr(
45 self._current_context, self._CONTEXT_KEY, Context(),
46 )
47 context = getattr(
48 self._current_context, self._CONTEXT_KEY
49 ) # type: Context
50 return context
51
52 def detach(self, token: object) -> None:
53 """See `opentelemetry.context.RuntimeContext.detach`."""
54 if not isinstance(token, self.Token):
55 raise ValueError("invalid token")
56 # pylint: disable=protected-access
57 setattr(self._current_context, self._CONTEXT_KEY, token._context)
58
59
60 __all__ = ["ThreadLocalRuntimeContext"]
61
[end of opentelemetry-api/src/opentelemetry/context/threadlocal_context.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-api/src/opentelemetry/context/threadlocal_context.py b/opentelemetry-api/src/opentelemetry/context/threadlocal_context.py
deleted file mode 100644
--- a/opentelemetry-api/src/opentelemetry/context/threadlocal_context.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import threading
-
-from opentelemetry.context.context import Context, RuntimeContext
-
-
-class ThreadLocalRuntimeContext(RuntimeContext):
- """An implementation of the RuntimeContext interface
- which uses thread-local storage under the hood. This
- implementation is available for usage with Python 3.4.
- """
-
- class Token:
- def __init__(self, context: Context) -> None:
- self._context = context
-
- _CONTEXT_KEY = "current_context"
-
- def __init__(self) -> None:
- self._current_context = threading.local()
-
- def attach(self, context: Context) -> object:
- """See `opentelemetry.context.RuntimeContext.attach`."""
- current = self.get_current()
- setattr(self._current_context, self._CONTEXT_KEY, context)
- return self.Token(current)
-
- def get_current(self) -> Context:
- """See `opentelemetry.context.RuntimeContext.get_current`."""
- if not hasattr(self._current_context, self._CONTEXT_KEY):
- setattr(
- self._current_context, self._CONTEXT_KEY, Context(),
- )
- context = getattr(
- self._current_context, self._CONTEXT_KEY
- ) # type: Context
- return context
-
- def detach(self, token: object) -> None:
- """See `opentelemetry.context.RuntimeContext.detach`."""
- if not isinstance(token, self.Token):
- raise ValueError("invalid token")
- # pylint: disable=protected-access
- setattr(self._current_context, self._CONTEXT_KEY, token._context)
-
-
-__all__ = ["ThreadLocalRuntimeContext"]
| {"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/context/threadlocal_context.py b/opentelemetry-api/src/opentelemetry/context/threadlocal_context.py\ndeleted file mode 100644\n--- a/opentelemetry-api/src/opentelemetry/context/threadlocal_context.py\n+++ /dev/null\n@@ -1,60 +0,0 @@\n-# Copyright The OpenTelemetry Authors\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-import threading\n-\n-from opentelemetry.context.context import Context, RuntimeContext\n-\n-\n-class ThreadLocalRuntimeContext(RuntimeContext):\n- \"\"\"An implementation of the RuntimeContext interface\n- which uses thread-local storage under the hood. This\n- implementation is available for usage with Python 3.4.\n- \"\"\"\n-\n- class Token:\n- def __init__(self, context: Context) -> None:\n- self._context = context\n-\n- _CONTEXT_KEY = \"current_context\"\n-\n- def __init__(self) -> None:\n- self._current_context = threading.local()\n-\n- def attach(self, context: Context) -> object:\n- \"\"\"See `opentelemetry.context.RuntimeContext.attach`.\"\"\"\n- current = self.get_current()\n- setattr(self._current_context, self._CONTEXT_KEY, context)\n- return self.Token(current)\n-\n- def get_current(self) -> Context:\n- \"\"\"See `opentelemetry.context.RuntimeContext.get_current`.\"\"\"\n- if not hasattr(self._current_context, self._CONTEXT_KEY):\n- setattr(\n- self._current_context, self._CONTEXT_KEY, Context(),\n- )\n- context = getattr(\n- self._current_context, self._CONTEXT_KEY\n- ) # type: Context\n- return context\n-\n- def detach(self, token: object) -> None:\n- \"\"\"See `opentelemetry.context.RuntimeContext.detach`.\"\"\"\n- if not isinstance(token, self.Token):\n- raise ValueError(\"invalid token\")\n- # pylint: disable=protected-access\n- setattr(self._current_context, self._CONTEXT_KEY, token._context)\n-\n-\n-__all__ = [\"ThreadLocalRuntimeContext\"]\n", "issue": "Check if ThreadLocalRuntimeContext can be removed since python3.4 support is dropped\nhttps://github.com/open-telemetry/opentelemetry-python/blob/master/opentelemetry-api/src/opentelemetry/context/threadlocal_context.py#L21\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport threading\n\nfrom opentelemetry.context.context import Context, RuntimeContext\n\n\nclass ThreadLocalRuntimeContext(RuntimeContext):\n \"\"\"An implementation of the RuntimeContext interface\n which uses thread-local storage under the hood. This\n implementation is available for usage with Python 3.4.\n \"\"\"\n\n class Token:\n def __init__(self, context: Context) -> None:\n self._context = context\n\n _CONTEXT_KEY = \"current_context\"\n\n def __init__(self) -> None:\n self._current_context = threading.local()\n\n def attach(self, context: Context) -> object:\n \"\"\"See `opentelemetry.context.RuntimeContext.attach`.\"\"\"\n current = self.get_current()\n setattr(self._current_context, self._CONTEXT_KEY, context)\n return self.Token(current)\n\n def get_current(self) -> Context:\n \"\"\"See `opentelemetry.context.RuntimeContext.get_current`.\"\"\"\n if not hasattr(self._current_context, self._CONTEXT_KEY):\n setattr(\n self._current_context, self._CONTEXT_KEY, Context(),\n )\n context = getattr(\n self._current_context, self._CONTEXT_KEY\n ) # type: Context\n return context\n\n def detach(self, token: object) -> None:\n \"\"\"See `opentelemetry.context.RuntimeContext.detach`.\"\"\"\n if not isinstance(token, self.Token):\n raise ValueError(\"invalid token\")\n # pylint: disable=protected-access\n setattr(self._current_context, self._CONTEXT_KEY, token._context)\n\n\n__all__ = [\"ThreadLocalRuntimeContext\"]\n", "path": "opentelemetry-api/src/opentelemetry/context/threadlocal_context.py"}]} | 1,178 | 585 |
gh_patches_debug_14512 | rasdani/github-patches | git_diff | safe-global__safe-config-service-698 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
500 Error on unsanitized URL query params
**Describe the bug**
Error response with 500 Internal server Error is returned to the clients when a unsanitized URL query param is sent to the service.
**To Reproduce**
Steps to reproduce the behavior:
- Check: https://safe-config.safe.global/api/v1/safe-apps/?url=%00
**Expected behavior**
URL input is sanitized beforehand.
**Environment**
- Staging & production
- All chains
</issue>
<code>
[start of src/safe_apps/views.py]
1 from typing import Any
2
3 from django.db.models import Q, QuerySet
4 from django.utils.decorators import method_decorator
5 from django.views.decorators.cache import cache_page
6 from drf_yasg import openapi
7 from drf_yasg.utils import swagger_auto_schema
8 from rest_framework.generics import ListAPIView
9 from rest_framework.request import Request
10 from rest_framework.response import Response
11
12 from .models import SafeApp
13 from .serializers import SafeAppsResponseSerializer
14
15
16 class SafeAppsListView(ListAPIView):
17 serializer_class = SafeAppsResponseSerializer
18 pagination_class = None
19
20 _swagger_chain_id_param = openapi.Parameter(
21 "chainId",
22 openapi.IN_QUERY,
23 description="Used to filter Safe Apps that are available on `chainId`",
24 type=openapi.TYPE_INTEGER,
25 )
26 _swagger_client_url_param = openapi.Parameter(
27 "clientUrl",
28 openapi.IN_QUERY,
29 description="Used to filter Safe Apps that are available on `clientUrl`",
30 type=openapi.TYPE_STRING,
31 )
32 _swagger_url_param = openapi.Parameter(
33 "url",
34 openapi.IN_QUERY,
35 description="Filter Safe Apps available from `url`. `url` needs to be an exact match",
36 type=openapi.TYPE_STRING,
37 )
38
39 @method_decorator(cache_page(60 * 10, cache="safe-apps")) # Cache 10 minutes
40 @swagger_auto_schema(
41 manual_parameters=[
42 _swagger_chain_id_param,
43 _swagger_client_url_param,
44 _swagger_url_param,
45 ]
46 ) # type: ignore[misc]
47 def get(self, request: Request, *args: Any, **kwargs: Any) -> Response:
48 """
49 Returns a collection of Safe Apps (across different chains).
50 Each Safe App can optionally include the information about the `Provider`
51 """
52 return super().get(request, *args, **kwargs)
53
54 def get_queryset(self) -> QuerySet[SafeApp]:
55 queryset = SafeApp.objects.filter(visible=True)
56
57 chain_id = self.request.query_params.get("chainId")
58 if chain_id is not None and chain_id.isdigit():
59 queryset = queryset.filter(chain_ids__contains=[chain_id])
60
61 client_url = self.request.query_params.get("clientUrl")
62 if client_url:
63 queryset = queryset.filter(
64 Q(exclusive_clients__url=client_url) | Q(exclusive_clients__isnull=True)
65 )
66
67 url = self.request.query_params.get("url")
68 if url:
69 queryset = queryset.filter(url=url)
70
71 return queryset
72
[end of src/safe_apps/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/safe_apps/views.py b/src/safe_apps/views.py
--- a/src/safe_apps/views.py
+++ b/src/safe_apps/views.py
@@ -59,13 +59,13 @@
queryset = queryset.filter(chain_ids__contains=[chain_id])
client_url = self.request.query_params.get("clientUrl")
- if client_url:
+ if client_url and "\0" not in client_url:
queryset = queryset.filter(
Q(exclusive_clients__url=client_url) | Q(exclusive_clients__isnull=True)
)
url = self.request.query_params.get("url")
- if url:
+ if url and "\0" not in url:
queryset = queryset.filter(url=url)
return queryset
| {"golden_diff": "diff --git a/src/safe_apps/views.py b/src/safe_apps/views.py\n--- a/src/safe_apps/views.py\n+++ b/src/safe_apps/views.py\n@@ -59,13 +59,13 @@\n queryset = queryset.filter(chain_ids__contains=[chain_id])\n \n client_url = self.request.query_params.get(\"clientUrl\")\n- if client_url:\n+ if client_url and \"\\0\" not in client_url:\n queryset = queryset.filter(\n Q(exclusive_clients__url=client_url) | Q(exclusive_clients__isnull=True)\n )\n \n url = self.request.query_params.get(\"url\")\n- if url:\n+ if url and \"\\0\" not in url:\n queryset = queryset.filter(url=url)\n \n return queryset\n", "issue": "500 Error on unsanitized URL query params \n**Describe the bug**\r\nError response with 500 Internal server Error is returned to the clients when a unsanitized URL query param is sent to the service.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n- Check: https://safe-config.safe.global/api/v1/safe-apps/?url=%00\r\n\r\n**Expected behavior**\r\nURL input is sanitized beforehand.\r\n\r\n**Environment**\r\n - Staging & production\r\n - All chains\r\n\n", "before_files": [{"content": "from typing import Any\n\nfrom django.db.models import Q, QuerySet\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.cache import cache_page\nfrom drf_yasg import openapi\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom .models import SafeApp\nfrom .serializers import SafeAppsResponseSerializer\n\n\nclass SafeAppsListView(ListAPIView):\n serializer_class = SafeAppsResponseSerializer\n pagination_class = None\n\n _swagger_chain_id_param = openapi.Parameter(\n \"chainId\",\n openapi.IN_QUERY,\n description=\"Used to filter Safe Apps that are available on `chainId`\",\n type=openapi.TYPE_INTEGER,\n )\n _swagger_client_url_param = openapi.Parameter(\n \"clientUrl\",\n openapi.IN_QUERY,\n description=\"Used to filter Safe Apps that are available on `clientUrl`\",\n type=openapi.TYPE_STRING,\n )\n _swagger_url_param = openapi.Parameter(\n \"url\",\n openapi.IN_QUERY,\n description=\"Filter Safe Apps available from `url`. `url` needs to be an exact match\",\n type=openapi.TYPE_STRING,\n )\n\n @method_decorator(cache_page(60 * 10, cache=\"safe-apps\")) # Cache 10 minutes\n @swagger_auto_schema(\n manual_parameters=[\n _swagger_chain_id_param,\n _swagger_client_url_param,\n _swagger_url_param,\n ]\n ) # type: ignore[misc]\n def get(self, request: Request, *args: Any, **kwargs: Any) -> Response:\n \"\"\"\n Returns a collection of Safe Apps (across different chains).\n Each Safe App can optionally include the information about the `Provider`\n \"\"\"\n return super().get(request, *args, **kwargs)\n\n def get_queryset(self) -> QuerySet[SafeApp]:\n queryset = SafeApp.objects.filter(visible=True)\n\n chain_id = self.request.query_params.get(\"chainId\")\n if chain_id is not None and chain_id.isdigit():\n queryset = queryset.filter(chain_ids__contains=[chain_id])\n\n client_url = self.request.query_params.get(\"clientUrl\")\n if client_url:\n queryset = queryset.filter(\n Q(exclusive_clients__url=client_url) | Q(exclusive_clients__isnull=True)\n )\n\n url = self.request.query_params.get(\"url\")\n if url:\n queryset = queryset.filter(url=url)\n\n return queryset\n", "path": "src/safe_apps/views.py"}]} | 1,320 | 169 |
gh_patches_debug_25253 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2368 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
take out secret dev notes visible in frontend :-)

</issue>
<code>
[start of meinberlin/apps/projects/templatetags/meinberlin_project_tags.py]
1 from django import template
2
3 from adhocracy4.comments.models import Comment
4 from meinberlin.apps.budgeting.models import Proposal as budget_proposal
5 from meinberlin.apps.ideas.models import Idea
6 from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal
7 from meinberlin.apps.mapideas.models import MapIdea
8 from meinberlin.apps.projects import get_project_type
9
10 register = template.Library()
11
12
13 @register.filter
14 def project_url(project):
15 if get_project_type(project) in ('external', 'bplan'):
16 return project.externalproject.url
17 return project.get_absolute_url()
18
19
20 @register.filter
21 def project_type(project):
22 return get_project_type(project)
23
24
25 @register.filter
26 def is_external(project):
27 return get_project_type(project) in ('external', 'bplan')
28
29
30 @register.filter
31 def is_container(project):
32 return get_project_type(project) == 'container'
33
34
35 @register.simple_tag
36 def to_class_name(value):
37 return value.__class__.__name__
38
39
40 @register.simple_tag
41 def get_num_entries(module):
42 """Count all user-generated items."""
43 item_count = Idea.objects.filter(module=module).count() \
44 + MapIdea.objects.filter(module=module).count() \
45 + budget_proposal.objects.filter(module=module).count() \
46 + kiezkasse_proposal.objects.filter(module=module).count() \
47 + Comment.objects.filter(idea__module=module).count() \
48 + Comment.objects.filter(mapidea__module=module).count() \
49 + Comment.objects.filter(budget_proposal__module=module).count() \
50 + Comment.objects.filter(kiezkasse_proposal__module=module).count()
51 return item_count
52
[end of meinberlin/apps/projects/templatetags/meinberlin_project_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py
--- a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py
+++ b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py
@@ -40,12 +40,18 @@
@register.simple_tag
def get_num_entries(module):
"""Count all user-generated items."""
- item_count = Idea.objects.filter(module=module).count() \
+ item_count = \
+ Idea.objects.filter(module=module).count() \
+ MapIdea.objects.filter(module=module).count() \
+ budget_proposal.objects.filter(module=module).count() \
+ kiezkasse_proposal.objects.filter(module=module).count() \
+ Comment.objects.filter(idea__module=module).count() \
+ Comment.objects.filter(mapidea__module=module).count() \
+ Comment.objects.filter(budget_proposal__module=module).count() \
- + Comment.objects.filter(kiezkasse_proposal__module=module).count()
+ + Comment.objects.filter(kiezkasse_proposal__module=module).count() \
+ + Comment.objects.filter(topic__module=module).count() \
+ + Comment.objects.filter(maptopic__module=module).count() \
+ + Comment.objects.filter(paragraph__chapter__module=module).count() \
+ + Comment.objects.filter(chapter__module=module).count() \
+ + Comment.objects.filter(poll__module=module).count()
return item_count
| {"golden_diff": "diff --git a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n--- a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n+++ b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n@@ -40,12 +40,18 @@\n @register.simple_tag\n def get_num_entries(module):\n \"\"\"Count all user-generated items.\"\"\"\n- item_count = Idea.objects.filter(module=module).count() \\\n+ item_count = \\\n+ Idea.objects.filter(module=module).count() \\\n + MapIdea.objects.filter(module=module).count() \\\n + budget_proposal.objects.filter(module=module).count() \\\n + kiezkasse_proposal.objects.filter(module=module).count() \\\n + Comment.objects.filter(idea__module=module).count() \\\n + Comment.objects.filter(mapidea__module=module).count() \\\n + Comment.objects.filter(budget_proposal__module=module).count() \\\n- + Comment.objects.filter(kiezkasse_proposal__module=module).count()\n+ + Comment.objects.filter(kiezkasse_proposal__module=module).count() \\\n+ + Comment.objects.filter(topic__module=module).count() \\\n+ + Comment.objects.filter(maptopic__module=module).count() \\\n+ + Comment.objects.filter(paragraph__chapter__module=module).count() \\\n+ + Comment.objects.filter(chapter__module=module).count() \\\n+ + Comment.objects.filter(poll__module=module).count()\n return item_count\n", "issue": "take out secret dev notes visible in frontend :-)\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django import template\n\nfrom adhocracy4.comments.models import Comment\nfrom meinberlin.apps.budgeting.models import Proposal as budget_proposal\nfrom meinberlin.apps.ideas.models import Idea\nfrom meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal\nfrom meinberlin.apps.mapideas.models import MapIdea\nfrom meinberlin.apps.projects import get_project_type\n\nregister = template.Library()\n\n\[email protected]\ndef project_url(project):\n if get_project_type(project) in ('external', 'bplan'):\n return project.externalproject.url\n return project.get_absolute_url()\n\n\[email protected]\ndef project_type(project):\n return get_project_type(project)\n\n\[email protected]\ndef is_external(project):\n return get_project_type(project) in ('external', 'bplan')\n\n\[email protected]\ndef is_container(project):\n return get_project_type(project) == 'container'\n\n\[email protected]_tag\ndef to_class_name(value):\n return value.__class__.__name__\n\n\[email protected]_tag\ndef get_num_entries(module):\n \"\"\"Count all user-generated items.\"\"\"\n item_count = Idea.objects.filter(module=module).count() \\\n + MapIdea.objects.filter(module=module).count() \\\n + budget_proposal.objects.filter(module=module).count() \\\n + kiezkasse_proposal.objects.filter(module=module).count() \\\n + Comment.objects.filter(idea__module=module).count() \\\n + Comment.objects.filter(mapidea__module=module).count() \\\n + Comment.objects.filter(budget_proposal__module=module).count() \\\n + Comment.objects.filter(kiezkasse_proposal__module=module).count()\n return item_count\n", "path": "meinberlin/apps/projects/templatetags/meinberlin_project_tags.py"}]} | 1,124 | 368 |
gh_patches_debug_6788 | rasdani/github-patches | git_diff | learningequality__kolibri-1733 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Login ID and Password fields for a learner/user should not be case sensitive.
## Summary
Login ID and Password fields for a learner/user should not be case sensitive, this is especially for young learners and they struggle a lot to login itself.
Please consider this change for Nalanda branch.
## System information
- Version: Kolibri 0.4.0beta9
- Operating system: Ubuntu 14.04 LTS
- Browser: Chrome
</issue>
<code>
[start of kolibri/auth/backends.py]
1 """
2 Implements custom auth backends as described in the Django docs, for our custom user classes -- FacilityUser and
3 DeviceOwner. The appropriate classes should be listed in the AUTHENTICATION_BACKENDS. Note that authentication
4 backends are checked in the order they're listed.
5 """
6
7 from kolibri.auth.models import DeviceOwner, FacilityUser
8
9
10 class FacilityUserBackend(object):
11 """
12 A class that implements authentication for FacilityUsers.
13 """
14
15 def authenticate(self, username=None, password=None, facility=None):
16 """
17 Authenticates the user if the credentials correspond to a FacilityUser for the specified Facility.
18
19 :param username: a string
20 :param password: a string
21 :param facility: a Facility
22 :return: A FacilityUser instance if successful, or None if authentication failed.
23 """
24 users = FacilityUser.objects.filter(username=username)
25 if facility:
26 users = users.filter(facility=facility)
27 for user in users:
28 if user.check_password(password):
29 return user
30 # Allow login without password for learners for facilities that allow this.
31 # Must specify the facility, to prevent accidental logins
32 elif facility and user.dataset.learner_can_login_with_no_password and not user.roles.count():
33 return user
34 return None
35
36 def get_user(self, user_id):
37 """
38 Gets a user. Auth backends are required to implement this.
39
40 :param user_id: A FacilityUser pk
41 :return: A FacilityUser instance if a BaseUser with that pk is found, else None.
42 """
43 try:
44 return FacilityUser.objects.get(pk=user_id)
45 except FacilityUser.DoesNotExist:
46 return None
47
48
49 class DeviceOwnerBackend(object):
50 """
51 A class that implements authentication for DeviceOwners.
52 """
53
54 def authenticate(self, username=None, password=None, **kwargs):
55 """
56 Authenticates the user if the credentials correspond to a DeviceOwner.
57
58 :param username: a string
59 :param password: a string
60 :return: A DeviceOwner instance if successful, or None if authentication failed.
61 """
62 try:
63 user = DeviceOwner.objects.get(username=username)
64 if user.check_password(password):
65 return user
66 else:
67 return None
68 except DeviceOwner.DoesNotExist:
69 return None
70
71 def get_user(self, user_id):
72 """
73 Gets a user. Auth backends are required to implement this.
74
75 :param user_id: A BaseUser pk
76 :return: A DeviceOwner instance if a BaseUser with that pk is found, else None.
77 """
78 try:
79 return DeviceOwner.objects.get(pk=user_id)
80 except DeviceOwner.DoesNotExist:
81 return None
82
[end of kolibri/auth/backends.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/auth/backends.py b/kolibri/auth/backends.py
--- a/kolibri/auth/backends.py
+++ b/kolibri/auth/backends.py
@@ -21,7 +21,7 @@
:param facility: a Facility
:return: A FacilityUser instance if successful, or None if authentication failed.
"""
- users = FacilityUser.objects.filter(username=username)
+ users = FacilityUser.objects.filter(username__iexact=username)
if facility:
users = users.filter(facility=facility)
for user in users:
| {"golden_diff": "diff --git a/kolibri/auth/backends.py b/kolibri/auth/backends.py\n--- a/kolibri/auth/backends.py\n+++ b/kolibri/auth/backends.py\n@@ -21,7 +21,7 @@\n :param facility: a Facility\n :return: A FacilityUser instance if successful, or None if authentication failed.\n \"\"\"\n- users = FacilityUser.objects.filter(username=username)\n+ users = FacilityUser.objects.filter(username__iexact=username)\n if facility:\n users = users.filter(facility=facility)\n for user in users:\n", "issue": "Login ID and Password fields for a learner/user should not be case sensitive.\n## Summary\r\n\r\nLogin ID and Password fields for a learner/user should not be case sensitive, this is especially for young learners and they struggle a lot to login itself.\r\n\r\nPlease consider this change for Nalanda branch.\r\n\r\n## System information\r\n - Version: Kolibri 0.4.0beta9\r\n - Operating system: Ubuntu 14.04 LTS\r\n - Browser: Chrome\r\n\n", "before_files": [{"content": "\"\"\"\nImplements custom auth backends as described in the Django docs, for our custom user classes -- FacilityUser and\nDeviceOwner. The appropriate classes should be listed in the AUTHENTICATION_BACKENDS. Note that authentication\nbackends are checked in the order they're listed.\n\"\"\"\n\nfrom kolibri.auth.models import DeviceOwner, FacilityUser\n\n\nclass FacilityUserBackend(object):\n \"\"\"\n A class that implements authentication for FacilityUsers.\n \"\"\"\n\n def authenticate(self, username=None, password=None, facility=None):\n \"\"\"\n Authenticates the user if the credentials correspond to a FacilityUser for the specified Facility.\n\n :param username: a string\n :param password: a string\n :param facility: a Facility\n :return: A FacilityUser instance if successful, or None if authentication failed.\n \"\"\"\n users = FacilityUser.objects.filter(username=username)\n if facility:\n users = users.filter(facility=facility)\n for user in users:\n if user.check_password(password):\n return user\n # Allow login without password for learners for facilities that allow this.\n # Must specify the facility, to prevent accidental logins\n elif facility and user.dataset.learner_can_login_with_no_password and not user.roles.count():\n return user\n return None\n\n def get_user(self, user_id):\n \"\"\"\n Gets a user. Auth backends are required to implement this.\n\n :param user_id: A FacilityUser pk\n :return: A FacilityUser instance if a BaseUser with that pk is found, else None.\n \"\"\"\n try:\n return FacilityUser.objects.get(pk=user_id)\n except FacilityUser.DoesNotExist:\n return None\n\n\nclass DeviceOwnerBackend(object):\n \"\"\"\n A class that implements authentication for DeviceOwners.\n \"\"\"\n\n def authenticate(self, username=None, password=None, **kwargs):\n \"\"\"\n Authenticates the user if the credentials correspond to a DeviceOwner.\n\n :param username: a string\n :param password: a string\n :return: A DeviceOwner instance if successful, or None if authentication failed.\n \"\"\"\n try:\n user = DeviceOwner.objects.get(username=username)\n if user.check_password(password):\n return user\n else:\n return None\n except DeviceOwner.DoesNotExist:\n return None\n\n def get_user(self, user_id):\n \"\"\"\n Gets a user. Auth backends are required to implement this.\n\n :param user_id: A BaseUser pk\n :return: A DeviceOwner instance if a BaseUser with that pk is found, else None.\n \"\"\"\n try:\n return DeviceOwner.objects.get(pk=user_id)\n except DeviceOwner.DoesNotExist:\n return None\n", "path": "kolibri/auth/backends.py"}]} | 1,352 | 127 |
gh_patches_debug_39006 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-2538 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Skills and Enclosure background services fail to stop and are killed...
## Be clear about the software, hardware and version you are running
For example:
in CLI
>> what version are you running
>> I am running mycroft-core version 20 oh 2, release 0
>> You are on the latest version.
Opensuse Leap 15.1
## Try to provide steps that we can use to replicate the Issue
For example:
1. CTRL+C in CLI
2. Enter ./stop_mycroft.sh
3. Skills and Enclosure services are eventually killed.
4. Takes about 30 seconds total
## Be as specific as possible about the expected condition, and the deviation from expected condition.
user@LinuxOS:~/mycroft-core> ./stop-mycroft.sh skills
Stopping skills (5579)...stopped.
user@LinuxOS:~/mycroft-core> ./stop-mycroft.sh enclosure
Stopping enclosure (5588)...failed to stop.
Killing enclosure (5588)...killed.
user@LinuxOS:~/mycroft-core> ./stop-mycroft.sh
Stopping all mycroft-core services
Stopping messagebus.service (5576)...stopped.
Stopping audio (5582)...stopped.
Stopping speech (5585)...stopped.
...
user@LinuxOS:~/mycroft-core> ./stop-mycroft.sh
Stopping all mycroft-core services
Stopping messagebus.service (18995)...stopped.
Stopping skills (18998)...failed to stop.
Killing skills (18998)...killed.
Stopping audio (19001)...stopped.
Stopping speech (19004)...stopped.
Stopping enclosure (19007)...failed to stop.
Killing enclosure (19007)...killed.
user@LinuxOS:~/mycroft-core>
</issue>
<code>
[start of mycroft/client/enclosure/__main__.py]
1 # Copyright 2017 Mycroft AI Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 import sys
16
17 from mycroft.util.log import LOG
18 from mycroft.messagebus.client import MessageBusClient
19 from mycroft.configuration import Configuration, LocalConf, SYSTEM_CONFIG
20
21
22 def main():
23 # Read the system configuration
24 system_config = LocalConf(SYSTEM_CONFIG)
25 platform = system_config.get("enclosure", {}).get("platform")
26
27 if platform == "mycroft_mark_1":
28 LOG.debug("Creating Mark I Enclosure")
29 from mycroft.client.enclosure.mark1 import EnclosureMark1
30 enclosure = EnclosureMark1()
31 elif platform == "mycroft_mark_2":
32 LOG.debug("Creating Mark II Enclosure")
33 from mycroft.client.enclosure.mark2 import EnclosureMark2
34 enclosure = EnclosureMark2()
35 else:
36 LOG.debug("Creating generic enclosure, platform='{}'".format(platform))
37
38 # TODO: Mechanism to load from elsewhere. E.g. read a script path from
39 # the mycroft.conf, then load/launch that script.
40 from mycroft.client.enclosure.generic import EnclosureGeneric
41 enclosure = EnclosureGeneric()
42
43 if enclosure:
44 try:
45 LOG.debug("Enclosure started!")
46 enclosure.run()
47 except Exception as e:
48 print(e)
49 finally:
50 sys.exit()
51 else:
52 LOG.debug("No enclosure available for this hardware, running headless")
53
54
55 if __name__ == "__main__":
56 main()
57
[end of mycroft/client/enclosure/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mycroft/client/enclosure/__main__.py b/mycroft/client/enclosure/__main__.py
--- a/mycroft/client/enclosure/__main__.py
+++ b/mycroft/client/enclosure/__main__.py
@@ -12,44 +12,67 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-import sys
+"""Entrypoint for enclosure service.
+This provides any "enclosure" specific functionality, for example GUI or
+control over the Mark-1 Faceplate.
+"""
+from mycroft.configuration import LocalConf, SYSTEM_CONFIG
from mycroft.util.log import LOG
-from mycroft.messagebus.client import MessageBusClient
-from mycroft.configuration import Configuration, LocalConf, SYSTEM_CONFIG
+from mycroft.util import (create_daemon, wait_for_exit_signal,
+ reset_sigint_handler)
-def main():
- # Read the system configuration
- system_config = LocalConf(SYSTEM_CONFIG)
- platform = system_config.get("enclosure", {}).get("platform")
+def create_enclosure(platform):
+ """Create an enclosure based on the provided platform string.
+ Arguments:
+ platform (str): platform name string
+
+ Returns:
+ Enclosure object
+ """
if platform == "mycroft_mark_1":
- LOG.debug("Creating Mark I Enclosure")
+ LOG.info("Creating Mark I Enclosure")
from mycroft.client.enclosure.mark1 import EnclosureMark1
enclosure = EnclosureMark1()
elif platform == "mycroft_mark_2":
- LOG.debug("Creating Mark II Enclosure")
+ LOG.info("Creating Mark II Enclosure")
from mycroft.client.enclosure.mark2 import EnclosureMark2
enclosure = EnclosureMark2()
else:
- LOG.debug("Creating generic enclosure, platform='{}'".format(platform))
+ LOG.info("Creating generic enclosure, platform='{}'".format(platform))
# TODO: Mechanism to load from elsewhere. E.g. read a script path from
# the mycroft.conf, then load/launch that script.
from mycroft.client.enclosure.generic import EnclosureGeneric
enclosure = EnclosureGeneric()
+ return enclosure
+
+
+def main():
+ """Launch one of the available enclosure implementations.
+
+ This depends on the configured platform and can currently either be
+ mycroft_mark_1 or mycroft_mark_2, if unconfigured a generic enclosure with
+ only the GUI bus will be started.
+ """
+ # Read the system configuration
+ system_config = LocalConf(SYSTEM_CONFIG)
+ platform = system_config.get("enclosure", {}).get("platform")
+
+ enclosure = create_enclosure(platform)
if enclosure:
try:
LOG.debug("Enclosure started!")
- enclosure.run()
+ reset_sigint_handler()
+ create_daemon(enclosure.run)
+ wait_for_exit_signal()
except Exception as e:
print(e)
- finally:
- sys.exit()
else:
- LOG.debug("No enclosure available for this hardware, running headless")
+ LOG.info("No enclosure available for this hardware, running headless")
if __name__ == "__main__":
| {"golden_diff": "diff --git a/mycroft/client/enclosure/__main__.py b/mycroft/client/enclosure/__main__.py\n--- a/mycroft/client/enclosure/__main__.py\n+++ b/mycroft/client/enclosure/__main__.py\n@@ -12,44 +12,67 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n #\n-import sys\n+\"\"\"Entrypoint for enclosure service.\n \n+This provides any \"enclosure\" specific functionality, for example GUI or\n+control over the Mark-1 Faceplate.\n+\"\"\"\n+from mycroft.configuration import LocalConf, SYSTEM_CONFIG\n from mycroft.util.log import LOG\n-from mycroft.messagebus.client import MessageBusClient\n-from mycroft.configuration import Configuration, LocalConf, SYSTEM_CONFIG\n+from mycroft.util import (create_daemon, wait_for_exit_signal,\n+ reset_sigint_handler)\n \n \n-def main():\n- # Read the system configuration\n- system_config = LocalConf(SYSTEM_CONFIG)\n- platform = system_config.get(\"enclosure\", {}).get(\"platform\")\n+def create_enclosure(platform):\n+ \"\"\"Create an enclosure based on the provided platform string.\n \n+ Arguments:\n+ platform (str): platform name string\n+\n+ Returns:\n+ Enclosure object\n+ \"\"\"\n if platform == \"mycroft_mark_1\":\n- LOG.debug(\"Creating Mark I Enclosure\")\n+ LOG.info(\"Creating Mark I Enclosure\")\n from mycroft.client.enclosure.mark1 import EnclosureMark1\n enclosure = EnclosureMark1()\n elif platform == \"mycroft_mark_2\":\n- LOG.debug(\"Creating Mark II Enclosure\")\n+ LOG.info(\"Creating Mark II Enclosure\")\n from mycroft.client.enclosure.mark2 import EnclosureMark2\n enclosure = EnclosureMark2()\n else:\n- LOG.debug(\"Creating generic enclosure, platform='{}'\".format(platform))\n+ LOG.info(\"Creating generic enclosure, platform='{}'\".format(platform))\n \n # TODO: Mechanism to load from elsewhere. E.g. read a script path from\n # the mycroft.conf, then load/launch that script.\n from mycroft.client.enclosure.generic import EnclosureGeneric\n enclosure = EnclosureGeneric()\n \n+ return enclosure\n+\n+\n+def main():\n+ \"\"\"Launch one of the available enclosure implementations.\n+\n+ This depends on the configured platform and can currently either be\n+ mycroft_mark_1 or mycroft_mark_2, if unconfigured a generic enclosure with\n+ only the GUI bus will be started.\n+ \"\"\"\n+ # Read the system configuration\n+ system_config = LocalConf(SYSTEM_CONFIG)\n+ platform = system_config.get(\"enclosure\", {}).get(\"platform\")\n+\n+ enclosure = create_enclosure(platform)\n if enclosure:\n try:\n LOG.debug(\"Enclosure started!\")\n- enclosure.run()\n+ reset_sigint_handler()\n+ create_daemon(enclosure.run)\n+ wait_for_exit_signal()\n except Exception as e:\n print(e)\n- finally:\n- sys.exit()\n else:\n- LOG.debug(\"No enclosure available for this hardware, running headless\")\n+ LOG.info(\"No enclosure available for this hardware, running headless\")\n \n \n if __name__ == \"__main__\":\n", "issue": "Skills and Enclosure background services fail to stop and are killed...\n## Be clear about the software, hardware and version you are running\r\n\r\nFor example: \r\n\r\nin CLI\r\n >> what version are you running \r\n >> I am running mycroft-core version 20 oh 2, release 0 \r\n >> You are on the latest version.\r\n\r\nOpensuse Leap 15.1\r\n## Try to provide steps that we can use to replicate the Issue\r\n\r\nFor example: \r\n\r\n1. CTRL+C in CLI\r\n2. Enter ./stop_mycroft.sh \r\n3. Skills and Enclosure services are eventually killed.\r\n4. Takes about 30 seconds total\r\n\r\n## Be as specific as possible about the expected condition, and the deviation from expected condition. \r\n\r\nuser@LinuxOS:~/mycroft-core> ./stop-mycroft.sh skills\r\nStopping skills (5579)...stopped.\r\nuser@LinuxOS:~/mycroft-core> ./stop-mycroft.sh enclosure\r\nStopping enclosure (5588)...failed to stop.\r\n Killing enclosure (5588)...killed.\r\nuser@LinuxOS:~/mycroft-core> ./stop-mycroft.sh\r\nStopping all mycroft-core services\r\nStopping messagebus.service (5576)...stopped.\r\nStopping audio (5582)...stopped.\r\nStopping speech (5585)...stopped.\r\n...\r\nuser@LinuxOS:~/mycroft-core> ./stop-mycroft.sh\r\nStopping all mycroft-core services\r\nStopping messagebus.service (18995)...stopped.\r\nStopping skills (18998)...failed to stop.\r\n Killing skills (18998)...killed.\r\nStopping audio (19001)...stopped.\r\nStopping speech (19004)...stopped.\r\nStopping enclosure (19007)...failed to stop.\r\n Killing enclosure (19007)...killed.\r\nuser@LinuxOS:~/mycroft-core> \r\n\r\n\n", "before_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport sys\n\nfrom mycroft.util.log import LOG\nfrom mycroft.messagebus.client import MessageBusClient\nfrom mycroft.configuration import Configuration, LocalConf, SYSTEM_CONFIG\n\n\ndef main():\n # Read the system configuration\n system_config = LocalConf(SYSTEM_CONFIG)\n platform = system_config.get(\"enclosure\", {}).get(\"platform\")\n\n if platform == \"mycroft_mark_1\":\n LOG.debug(\"Creating Mark I Enclosure\")\n from mycroft.client.enclosure.mark1 import EnclosureMark1\n enclosure = EnclosureMark1()\n elif platform == \"mycroft_mark_2\":\n LOG.debug(\"Creating Mark II Enclosure\")\n from mycroft.client.enclosure.mark2 import EnclosureMark2\n enclosure = EnclosureMark2()\n else:\n LOG.debug(\"Creating generic enclosure, platform='{}'\".format(platform))\n\n # TODO: Mechanism to load from elsewhere. E.g. read a script path from\n # the mycroft.conf, then load/launch that script.\n from mycroft.client.enclosure.generic import EnclosureGeneric\n enclosure = EnclosureGeneric()\n\n if enclosure:\n try:\n LOG.debug(\"Enclosure started!\")\n enclosure.run()\n except Exception as e:\n print(e)\n finally:\n sys.exit()\n else:\n LOG.debug(\"No enclosure available for this hardware, running headless\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "mycroft/client/enclosure/__main__.py"}]} | 1,501 | 711 |
gh_patches_debug_4275 | rasdani/github-patches | git_diff | comic__grand-challenge.org-37 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Country is not stored in db on signup
When a user signs up the country is not stored in the db
</issue>
<code>
[start of django/profiles/forms.py]
1 from django import forms
2 from django.utils.translation import ugettext_lazy as _
3 from django_countries.countries import COUNTRIES
4
5 from userena.forms import SignupForm
6
7 class SignupFormExtra(SignupForm):
8 institution = forms.CharField(label=_(u'Institution'),
9 max_length = 100,
10 required = True,
11 help_text=_(u'Institution you are affiliated to.'))
12 department = forms.CharField(label=_(u'Department'),
13 max_length = 100,
14 required = True,
15 help_text=_(u'Department you represent.'))
16 country = forms.ChoiceField(label=_(u'Country'),
17 choices=COUNTRIES,
18 required = True)
19 website = forms.CharField(label=_(u'Website'),
20 max_length = 150,
21 required = False)
22 first_name = forms.CharField(label=_(u'First Name'),
23 max_length = 30,
24 required = True)
25 last_name = forms.CharField(label=_(u'Last Name'),
26 max_length = 30,
27 required = True)
28
29 def __init__(self, *args, **kw):
30 """ Bit of hackery to get the first and last name at the top of the form.
31 """
32 super(SignupFormExtra,self).__init__(*args,**kw)
33 # Put the first and last name at the top.
34 new_order = self.fields.keyOrder[:-2]
35 new_order.insert(0, 'first_name')
36 new_order.insert(1, 'last_name')
37 self.fields.keyOrder = new_order
38
39 def save(self):
40 user = super(SignupFormExtra,self).save()
41 user.first_name = self.cleaned_data['first_name']
42 user.last_name = self.cleaned_data['last_name']
43 user.save()
44 user_profile = user.get_profile()
45 user_profile.institution = self.cleaned_data['institution']
46 user_profile.department = self.cleaned_data['department']
47 user_profile.save()
48
49 return user
50
[end of django/profiles/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django/profiles/forms.py b/django/profiles/forms.py
--- a/django/profiles/forms.py
+++ b/django/profiles/forms.py
@@ -44,6 +44,7 @@
user_profile = user.get_profile()
user_profile.institution = self.cleaned_data['institution']
user_profile.department = self.cleaned_data['department']
+ user_profile.country = self.cleaned_data['country']
user_profile.save()
return user
| {"golden_diff": "diff --git a/django/profiles/forms.py b/django/profiles/forms.py\n--- a/django/profiles/forms.py\n+++ b/django/profiles/forms.py\n@@ -44,6 +44,7 @@\n user_profile = user.get_profile()\n user_profile.institution = self.cleaned_data['institution']\n user_profile.department = self.cleaned_data['department']\n+ user_profile.country = self.cleaned_data['country']\n user_profile.save()\n \n return user\n", "issue": "Country is not stored in db on signup\nWhen a user signs up the country is not stored in the db\n\n", "before_files": [{"content": "from django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_countries.countries import COUNTRIES\n\nfrom userena.forms import SignupForm\n\nclass SignupFormExtra(SignupForm):\n institution = forms.CharField(label=_(u'Institution'),\n max_length = 100,\n required = True,\n help_text=_(u'Institution you are affiliated to.'))\n department = forms.CharField(label=_(u'Department'),\n max_length = 100,\n required = True,\n help_text=_(u'Department you represent.'))\n country = forms.ChoiceField(label=_(u'Country'),\n choices=COUNTRIES,\n required = True)\n website = forms.CharField(label=_(u'Website'),\n max_length = 150,\n required = False)\n first_name = forms.CharField(label=_(u'First Name'),\n max_length = 30,\n required = True)\n last_name = forms.CharField(label=_(u'Last Name'),\n max_length = 30,\n required = True)\n\n def __init__(self, *args, **kw):\n \"\"\" Bit of hackery to get the first and last name at the top of the form.\n \"\"\"\n super(SignupFormExtra,self).__init__(*args,**kw)\n # Put the first and last name at the top.\n new_order = self.fields.keyOrder[:-2]\n new_order.insert(0, 'first_name')\n new_order.insert(1, 'last_name')\n self.fields.keyOrder = new_order\n\n def save(self):\n user = super(SignupFormExtra,self).save()\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n user.save()\n user_profile = user.get_profile()\n user_profile.institution = self.cleaned_data['institution']\n user_profile.department = self.cleaned_data['department']\n user_profile.save()\n\n return user\n", "path": "django/profiles/forms.py"}]} | 1,074 | 102 |
gh_patches_debug_26386 | rasdani/github-patches | git_diff | scverse__scanpy-2879 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
scanpy 1.10.0rc1 breaks anndata pre-release tests
### Please make sure these conditions are met
- [X] I have checked that this issue has not already been reported.
- [X] I have confirmed this bug exists on the latest version of scanpy.
- [X] (optional) I have confirmed this bug exists on the master branch of scanpy.
### What happened?
`@doctest_needs` decorator causes test failures on scanpy import in anndata test suite
https://dev.azure.com/scverse/anndata/_build/results?buildId=5802&view=logs&jobId=0497d03e-5796-547f-cc56-989f8152a63c&j=0497d03e-5796-547f-cc56-989f8152a63c&t=ea3acdad-0250-5b8b-a1da-6cd02463cf17
### Minimal code sample
```python
NA
```
### Error output
```pytb
else:
enum_member = enum_class._new_member_(enum_class, *args)
if not hasattr(enum_member, '_value_'):
if enum_class._member_type_ is object:
enum_member._value_ = value
else:
try:
enum_member._value_ = enum_class._member_type_(*args)
except Exception as exc:
new_exc = TypeError(
'_value_ not set in __new__, unable to create it'
)
new_exc.__cause__ = exc
> raise new_exc
E TypeError: _value_ not set in __new__, unable to create it
```
### Versions
<details>
```
See anndata test failure
```
</details>
</issue>
<code>
[start of scanpy/__init__.py]
1 """Single-Cell Analysis in Python."""
2 from __future__ import annotations
3
4 try: # See https://github.com/maresb/hatch-vcs-footgun-example
5 from setuptools_scm import get_version
6
7 __version__ = get_version(root="..", relative_to=__file__)
8 del get_version
9 except (ImportError, LookupError):
10 try:
11 from ._version import __version__
12 except ModuleNotFoundError:
13 raise RuntimeError(
14 "scanpy is not correctly installed. Please install it, e.g. with pip."
15 )
16
17 from ._utils import check_versions
18
19 check_versions()
20 del check_versions
21
22 # the actual API
23 # (start with settings as several tools are using it)
24 from anndata import (
25 AnnData,
26 concat,
27 read_csv,
28 read_excel,
29 read_h5ad,
30 read_hdf,
31 read_loom,
32 read_mtx,
33 read_text,
34 read_umi_tools,
35 )
36
37 from . import datasets, experimental, external, get, logging, metrics, queries
38 from . import plotting as pl
39 from . import preprocessing as pp
40 from . import tools as tl
41 from ._settings import Verbosity, settings
42 from .neighbors import Neighbors
43 from .readwrite import read, read_10x_h5, read_10x_mtx, read_visium, write
44
45 set_figure_params = settings.set_figure_params
46
47 # has to be done at the end, after everything has been imported
48 import sys
49
50 sys.modules.update({f"{__name__}.{m}": globals()[m] for m in ["tl", "pp", "pl"]})
51 from ._utils import annotate_doc_types
52
53 annotate_doc_types(sys.modules[__name__], "scanpy")
54 del sys, annotate_doc_types
55
56 __all__ = [
57 "__version__",
58 "AnnData",
59 "concat",
60 "read_csv",
61 "read_excel",
62 "read_h5ad",
63 "read_hdf",
64 "read_loom",
65 "read_mtx",
66 "read_text",
67 "read_umi_tools",
68 "read",
69 "read_10x_h5",
70 "read_10x_mtx",
71 "read_visium",
72 "write",
73 "datasets",
74 "experimental",
75 "external",
76 "get",
77 "logging",
78 "metrics",
79 "queries",
80 "pl",
81 "pp",
82 "tl",
83 "Verbosity",
84 "settings",
85 "Neighbors",
86 "set_figure_params",
87 ]
88
[end of scanpy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scanpy/__init__.py b/scanpy/__init__.py
--- a/scanpy/__init__.py
+++ b/scanpy/__init__.py
@@ -1,6 +1,8 @@
"""Single-Cell Analysis in Python."""
from __future__ import annotations
+import sys
+
try: # See https://github.com/maresb/hatch-vcs-footgun-example
from setuptools_scm import get_version
@@ -21,6 +23,11 @@
# the actual API
# (start with settings as several tools are using it)
+
+from ._settings import Verbosity, settings
+
+set_figure_params = settings.set_figure_params
+
from anndata import (
AnnData,
concat,
@@ -38,15 +45,10 @@
from . import plotting as pl
from . import preprocessing as pp
from . import tools as tl
-from ._settings import Verbosity, settings
from .neighbors import Neighbors
from .readwrite import read, read_10x_h5, read_10x_mtx, read_visium, write
-set_figure_params = settings.set_figure_params
-
# has to be done at the end, after everything has been imported
-import sys
-
sys.modules.update({f"{__name__}.{m}": globals()[m] for m in ["tl", "pp", "pl"]})
from ._utils import annotate_doc_types
| {"golden_diff": "diff --git a/scanpy/__init__.py b/scanpy/__init__.py\n--- a/scanpy/__init__.py\n+++ b/scanpy/__init__.py\n@@ -1,6 +1,8 @@\n \"\"\"Single-Cell Analysis in Python.\"\"\"\n from __future__ import annotations\n \n+import sys\n+\n try: # See https://github.com/maresb/hatch-vcs-footgun-example\n from setuptools_scm import get_version\n \n@@ -21,6 +23,11 @@\n \n # the actual API\n # (start with settings as several tools are using it)\n+\n+from ._settings import Verbosity, settings\n+\n+set_figure_params = settings.set_figure_params\n+\n from anndata import (\n AnnData,\n concat,\n@@ -38,15 +45,10 @@\n from . import plotting as pl\n from . import preprocessing as pp\n from . import tools as tl\n-from ._settings import Verbosity, settings\n from .neighbors import Neighbors\n from .readwrite import read, read_10x_h5, read_10x_mtx, read_visium, write\n \n-set_figure_params = settings.set_figure_params\n-\n # has to be done at the end, after everything has been imported\n-import sys\n-\n sys.modules.update({f\"{__name__}.{m}\": globals()[m] for m in [\"tl\", \"pp\", \"pl\"]})\n from ._utils import annotate_doc_types\n", "issue": "scanpy 1.10.0rc1 breaks anndata pre-release tests\n### Please make sure these conditions are met\n\n- [X] I have checked that this issue has not already been reported.\n- [X] I have confirmed this bug exists on the latest version of scanpy.\n- [X] (optional) I have confirmed this bug exists on the master branch of scanpy.\n\n### What happened?\n\n`@doctest_needs` decorator causes test failures on scanpy import in anndata test suite\r\n\r\nhttps://dev.azure.com/scverse/anndata/_build/results?buildId=5802&view=logs&jobId=0497d03e-5796-547f-cc56-989f8152a63c&j=0497d03e-5796-547f-cc56-989f8152a63c&t=ea3acdad-0250-5b8b-a1da-6cd02463cf17\r\n\r\n\n\n### Minimal code sample\n\n```python\nNA\n```\n\n\n### Error output\n\n```pytb\nelse:\r\n enum_member = enum_class._new_member_(enum_class, *args)\r\n if not hasattr(enum_member, '_value_'):\r\n if enum_class._member_type_ is object:\r\n enum_member._value_ = value\r\n else:\r\n try:\r\n enum_member._value_ = enum_class._member_type_(*args)\r\n except Exception as exc:\r\n new_exc = TypeError(\r\n '_value_ not set in __new__, unable to create it'\r\n )\r\n new_exc.__cause__ = exc\r\n> raise new_exc\r\nE TypeError: _value_ not set in __new__, unable to create it\n```\n\n\n### Versions\n\n<details>\r\n\r\n```\r\nSee anndata test failure\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "\"\"\"Single-Cell Analysis in Python.\"\"\"\nfrom __future__ import annotations\n\ntry: # See https://github.com/maresb/hatch-vcs-footgun-example\n from setuptools_scm import get_version\n\n __version__ = get_version(root=\"..\", relative_to=__file__)\n del get_version\nexcept (ImportError, LookupError):\n try:\n from ._version import __version__\n except ModuleNotFoundError:\n raise RuntimeError(\n \"scanpy is not correctly installed. Please install it, e.g. with pip.\"\n )\n\nfrom ._utils import check_versions\n\ncheck_versions()\ndel check_versions\n\n# the actual API\n# (start with settings as several tools are using it)\nfrom anndata import (\n AnnData,\n concat,\n read_csv,\n read_excel,\n read_h5ad,\n read_hdf,\n read_loom,\n read_mtx,\n read_text,\n read_umi_tools,\n)\n\nfrom . import datasets, experimental, external, get, logging, metrics, queries\nfrom . import plotting as pl\nfrom . import preprocessing as pp\nfrom . import tools as tl\nfrom ._settings import Verbosity, settings\nfrom .neighbors import Neighbors\nfrom .readwrite import read, read_10x_h5, read_10x_mtx, read_visium, write\n\nset_figure_params = settings.set_figure_params\n\n# has to be done at the end, after everything has been imported\nimport sys\n\nsys.modules.update({f\"{__name__}.{m}\": globals()[m] for m in [\"tl\", \"pp\", \"pl\"]})\nfrom ._utils import annotate_doc_types\n\nannotate_doc_types(sys.modules[__name__], \"scanpy\")\ndel sys, annotate_doc_types\n\n__all__ = [\n \"__version__\",\n \"AnnData\",\n \"concat\",\n \"read_csv\",\n \"read_excel\",\n \"read_h5ad\",\n \"read_hdf\",\n \"read_loom\",\n \"read_mtx\",\n \"read_text\",\n \"read_umi_tools\",\n \"read\",\n \"read_10x_h5\",\n \"read_10x_mtx\",\n \"read_visium\",\n \"write\",\n \"datasets\",\n \"experimental\",\n \"external\",\n \"get\",\n \"logging\",\n \"metrics\",\n \"queries\",\n \"pl\",\n \"pp\",\n \"tl\",\n \"Verbosity\",\n \"settings\",\n \"Neighbors\",\n \"set_figure_params\",\n]\n", "path": "scanpy/__init__.py"}]} | 1,653 | 312 |
gh_patches_debug_64222 | rasdani/github-patches | git_diff | scrapy__scrapy-1313 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
HTTP_PROXY variable with username and empty password not supported
Scrapy doesn't support proxy authentication when the password is empty when using the HTTP_PROXY environment variable to supply the proxy argument.
</issue>
<code>
[start of scrapy/downloadermiddlewares/httpproxy.py]
1 import base64
2 from six.moves.urllib.request import getproxies, proxy_bypass
3 from six.moves.urllib.parse import unquote
4 try:
5 from urllib2 import _parse_proxy
6 except ImportError:
7 from urllib.request import _parse_proxy
8 from six.moves.urllib.parse import urlunparse
9
10 from scrapy.utils.httpobj import urlparse_cached
11 from scrapy.exceptions import NotConfigured
12
13
14 class HttpProxyMiddleware(object):
15
16 def __init__(self):
17 self.proxies = {}
18 for type, url in getproxies().items():
19 self.proxies[type] = self._get_proxy(url, type)
20
21 if not self.proxies:
22 raise NotConfigured
23
24 def _get_proxy(self, url, orig_type):
25 proxy_type, user, password, hostport = _parse_proxy(url)
26 proxy_url = urlunparse((proxy_type or orig_type, hostport, '', '', '', ''))
27
28 if user and password:
29 user_pass = '%s:%s' % (unquote(user), unquote(password))
30 creds = base64.b64encode(user_pass).strip()
31 else:
32 creds = None
33
34 return creds, proxy_url
35
36 def process_request(self, request, spider):
37 # ignore if proxy is already seted
38 if 'proxy' in request.meta:
39 return
40
41 parsed = urlparse_cached(request)
42 scheme = parsed.scheme
43
44 # 'no_proxy' is only supported by http schemes
45 if scheme in ('http', 'https') and proxy_bypass(parsed.hostname):
46 return
47
48 if scheme in self.proxies:
49 self._set_proxy(request, scheme)
50
51 def _set_proxy(self, request, scheme):
52 creds, proxy = self.proxies[scheme]
53 request.meta['proxy'] = proxy
54 if creds:
55 request.headers['Proxy-Authorization'] = 'Basic ' + creds
56
[end of scrapy/downloadermiddlewares/httpproxy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/downloadermiddlewares/httpproxy.py b/scrapy/downloadermiddlewares/httpproxy.py
--- a/scrapy/downloadermiddlewares/httpproxy.py
+++ b/scrapy/downloadermiddlewares/httpproxy.py
@@ -25,7 +25,7 @@
proxy_type, user, password, hostport = _parse_proxy(url)
proxy_url = urlunparse((proxy_type or orig_type, hostport, '', '', '', ''))
- if user and password:
+ if user:
user_pass = '%s:%s' % (unquote(user), unquote(password))
creds = base64.b64encode(user_pass).strip()
else:
| {"golden_diff": "diff --git a/scrapy/downloadermiddlewares/httpproxy.py b/scrapy/downloadermiddlewares/httpproxy.py\n--- a/scrapy/downloadermiddlewares/httpproxy.py\n+++ b/scrapy/downloadermiddlewares/httpproxy.py\n@@ -25,7 +25,7 @@\n proxy_type, user, password, hostport = _parse_proxy(url)\n proxy_url = urlunparse((proxy_type or orig_type, hostport, '', '', '', ''))\n \n- if user and password:\n+ if user:\n user_pass = '%s:%s' % (unquote(user), unquote(password))\n creds = base64.b64encode(user_pass).strip()\n else:\n", "issue": "HTTP_PROXY variable with username and empty password not supported\nScrapy doesn't support proxy authentication when the password is empty when using the HTTP_PROXY environment variable to supply the proxy argument.\n\n", "before_files": [{"content": "import base64\nfrom six.moves.urllib.request import getproxies, proxy_bypass\nfrom six.moves.urllib.parse import unquote\ntry:\n from urllib2 import _parse_proxy\nexcept ImportError:\n from urllib.request import _parse_proxy\nfrom six.moves.urllib.parse import urlunparse\n\nfrom scrapy.utils.httpobj import urlparse_cached\nfrom scrapy.exceptions import NotConfigured\n\n\nclass HttpProxyMiddleware(object):\n\n def __init__(self):\n self.proxies = {}\n for type, url in getproxies().items():\n self.proxies[type] = self._get_proxy(url, type)\n\n if not self.proxies:\n raise NotConfigured\n\n def _get_proxy(self, url, orig_type):\n proxy_type, user, password, hostport = _parse_proxy(url)\n proxy_url = urlunparse((proxy_type or orig_type, hostport, '', '', '', ''))\n\n if user and password:\n user_pass = '%s:%s' % (unquote(user), unquote(password))\n creds = base64.b64encode(user_pass).strip()\n else:\n creds = None\n\n return creds, proxy_url\n\n def process_request(self, request, spider):\n # ignore if proxy is already seted\n if 'proxy' in request.meta:\n return\n\n parsed = urlparse_cached(request)\n scheme = parsed.scheme\n\n # 'no_proxy' is only supported by http schemes\n if scheme in ('http', 'https') and proxy_bypass(parsed.hostname):\n return\n\n if scheme in self.proxies:\n self._set_proxy(request, scheme)\n\n def _set_proxy(self, request, scheme):\n creds, proxy = self.proxies[scheme]\n request.meta['proxy'] = proxy\n if creds:\n request.headers['Proxy-Authorization'] = 'Basic ' + creds\n", "path": "scrapy/downloadermiddlewares/httpproxy.py"}]} | 1,089 | 157 |
gh_patches_debug_21249 | rasdani/github-patches | git_diff | statsmodels__statsmodels-3439 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
API/DOCS: newer correlation tools are missing in api and docs
`stats.api` and http://www.statsmodels.org/dev/stats.html#moment-helpers
only shows the original functions, not those added by Kerby
(I'm trying to figure out where we should put new correlation and covariance function, hypothesis tests, robust, regularized covariance and correlation.)
</issue>
<code>
[start of statsmodels/stats/api.py]
1 # pylint: disable=W0611
2 from . import diagnostic
3 from .diagnostic import (
4 acorr_ljungbox, acorr_breusch_godfrey,
5 CompareCox, compare_cox, CompareJ, compare_j,
6 HetGoldfeldQuandt, het_goldfeldquandt,
7 het_breuschpagan, het_white, het_arch,
8 linear_harvey_collier, linear_rainbow, linear_lm,
9 breaks_cusumolsresid, breaks_hansen, recursive_olsresiduals,
10 unitroot_adf,
11 normal_ad, lilliefors,
12 # deprecated because of misspelling:
13 lillifors, het_breushpagan, acorr_breush_godfrey
14 )
15
16 from . import multicomp
17 from .multitest import (multipletests, fdrcorrection, fdrcorrection_twostage)
18 from .multicomp import tukeyhsd
19 from . import gof
20 from .gof import (powerdiscrepancy, gof_chisquare_discrete,
21 chisquare_effectsize)
22 from . import stattools
23 from .stattools import durbin_watson, omni_normtest, jarque_bera
24
25 from . import sandwich_covariance
26 from .sandwich_covariance import (
27 cov_cluster, cov_cluster_2groups, cov_nw_panel,
28 cov_hac, cov_white_simple,
29 cov_hc0, cov_hc1, cov_hc2, cov_hc3,
30 se_cov
31 )
32
33 from .weightstats import (DescrStatsW, CompareMeans, ttest_ind, ttost_ind,
34 ttost_paired, ztest, ztost, zconfint)
35
36 from .proportion import (binom_test_reject_interval, binom_test,
37 binom_tost, binom_tost_reject_interval,
38 power_binom_tost, power_ztost_prop,
39 proportion_confint, proportion_effectsize,
40 proportions_chisquare, proportions_chisquare_allpairs,
41 proportions_chisquare_pairscontrol, proportions_ztest,
42 proportions_ztost)
43
44 from .power import (TTestPower, TTestIndPower, GofChisquarePower,
45 NormalIndPower, FTestAnovaPower, FTestPower,
46 tt_solve_power, tt_ind_solve_power, zt_ind_solve_power)
47
48 from .descriptivestats import Describe
49
50 from .anova import anova_lm
51
52 from . import moment_helpers
53 from .correlation_tools import corr_nearest, corr_clipped, cov_nearest
54
55 from statsmodels.sandbox.stats.runs import (Runs, runstest_1samp, runstest_2samp)
56
57 from statsmodels.stats.contingency_tables import (mcnemar, cochrans_q,
58 SquareTable,
59 Table2x2,
60 Table,
61 StratifiedTable)
62
[end of statsmodels/stats/api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/statsmodels/stats/api.py b/statsmodels/stats/api.py
--- a/statsmodels/stats/api.py
+++ b/statsmodels/stats/api.py
@@ -39,7 +39,7 @@
proportion_confint, proportion_effectsize,
proportions_chisquare, proportions_chisquare_allpairs,
proportions_chisquare_pairscontrol, proportions_ztest,
- proportions_ztost)
+ proportions_ztost, multinomial_proportions_confint)
from .power import (TTestPower, TTestIndPower, GofChisquarePower,
NormalIndPower, FTestAnovaPower, FTestPower,
@@ -50,7 +50,9 @@
from .anova import anova_lm
from . import moment_helpers
-from .correlation_tools import corr_nearest, corr_clipped, cov_nearest
+from .correlation_tools import (corr_clipped, corr_nearest,
+ corr_nearest_factor, corr_thresholded, cov_nearest,
+ cov_nearest_factor_homog, FactoredPSDMatrix)
from statsmodels.sandbox.stats.runs import (Runs, runstest_1samp, runstest_2samp)
| {"golden_diff": "diff --git a/statsmodels/stats/api.py b/statsmodels/stats/api.py\n--- a/statsmodels/stats/api.py\n+++ b/statsmodels/stats/api.py\n@@ -39,7 +39,7 @@\n proportion_confint, proportion_effectsize,\n proportions_chisquare, proportions_chisquare_allpairs,\n proportions_chisquare_pairscontrol, proportions_ztest,\n- proportions_ztost)\n+ proportions_ztost, multinomial_proportions_confint)\n \n from .power import (TTestPower, TTestIndPower, GofChisquarePower,\n NormalIndPower, FTestAnovaPower, FTestPower,\n@@ -50,7 +50,9 @@\n from .anova import anova_lm\n \n from . import moment_helpers\n-from .correlation_tools import corr_nearest, corr_clipped, cov_nearest\n+from .correlation_tools import (corr_clipped, corr_nearest,\n+ corr_nearest_factor, corr_thresholded, cov_nearest,\n+ cov_nearest_factor_homog, FactoredPSDMatrix)\n \n from statsmodels.sandbox.stats.runs import (Runs, runstest_1samp, runstest_2samp)\n", "issue": "API/DOCS: newer correlation tools are missing in api and docs\n`stats.api` and http://www.statsmodels.org/dev/stats.html#moment-helpers\nonly shows the original functions, not those added by Kerby\n\n(I'm trying to figure out where we should put new correlation and covariance function, hypothesis tests, robust, regularized covariance and correlation.)\n\n", "before_files": [{"content": "# pylint: disable=W0611\nfrom . import diagnostic\nfrom .diagnostic import (\n acorr_ljungbox, acorr_breusch_godfrey,\n CompareCox, compare_cox, CompareJ, compare_j,\n HetGoldfeldQuandt, het_goldfeldquandt,\n het_breuschpagan, het_white, het_arch,\n linear_harvey_collier, linear_rainbow, linear_lm,\n breaks_cusumolsresid, breaks_hansen, recursive_olsresiduals,\n unitroot_adf,\n normal_ad, lilliefors,\n # deprecated because of misspelling:\n lillifors, het_breushpagan, acorr_breush_godfrey\n )\n\nfrom . import multicomp\nfrom .multitest import (multipletests, fdrcorrection, fdrcorrection_twostage)\nfrom .multicomp import tukeyhsd\nfrom . import gof\nfrom .gof import (powerdiscrepancy, gof_chisquare_discrete,\n chisquare_effectsize)\nfrom . import stattools\nfrom .stattools import durbin_watson, omni_normtest, jarque_bera\n\nfrom . import sandwich_covariance\nfrom .sandwich_covariance import (\n cov_cluster, cov_cluster_2groups, cov_nw_panel,\n cov_hac, cov_white_simple,\n cov_hc0, cov_hc1, cov_hc2, cov_hc3,\n se_cov\n )\n\nfrom .weightstats import (DescrStatsW, CompareMeans, ttest_ind, ttost_ind,\n ttost_paired, ztest, ztost, zconfint)\n\nfrom .proportion import (binom_test_reject_interval, binom_test,\n binom_tost, binom_tost_reject_interval,\n power_binom_tost, power_ztost_prop,\n proportion_confint, proportion_effectsize,\n proportions_chisquare, proportions_chisquare_allpairs,\n proportions_chisquare_pairscontrol, proportions_ztest,\n proportions_ztost)\n\nfrom .power import (TTestPower, TTestIndPower, GofChisquarePower,\n NormalIndPower, FTestAnovaPower, FTestPower,\n tt_solve_power, tt_ind_solve_power, zt_ind_solve_power)\n\nfrom .descriptivestats import Describe\n\nfrom .anova import anova_lm\n\nfrom . import moment_helpers\nfrom .correlation_tools import corr_nearest, corr_clipped, cov_nearest\n\nfrom statsmodels.sandbox.stats.runs import (Runs, runstest_1samp, runstest_2samp)\n\nfrom statsmodels.stats.contingency_tables import (mcnemar, cochrans_q,\n SquareTable,\n Table2x2,\n Table,\n StratifiedTable)\n", "path": "statsmodels/stats/api.py"}]} | 1,349 | 255 |
gh_patches_debug_12624 | rasdani/github-patches | git_diff | secdev__scapy-2631 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use nextproto property instead of nextprotocol
This is just a checklist to guide you. You can remove it safely.
**Checklist:**
- [x ] If you are new to Scapy: I have checked <https://github.com/secdev/scapy/blob/master/CONTRIBUTING.md> (esp. section submitting-pull-requests)
- [ ] I squashed commits belonging together
- [ ] I added unit tests or explained why they are not relevant
- [ ] I executed the regression tests for Python2 and Python3 (using `tox` or, `cd test && ./run_tests_py2, cd test && ./run_tests_py3`)
- [ ] If the PR is still not finished, please create a [Draft Pull Request](https://github.blog/2019-02-14-introducing-draft-pull-requests/)
> brief description what this PR will do, e.g. fixes broken dissection of XXX
Fix wrong property in `bind_layers` function of NSH protocol. In the NSH class, it defines `nextproto` for next protocol property.
I changed from `nextprotocol` to `nextproto` in `bind_layers` functions.
> if required - short explanation why you fixed something in a way that may look more complicated as it actually is
> if required - outline impacts on other parts of the library
</issue>
<code>
[start of scapy/contrib/nsh.py]
1 # This file is part of Scapy
2 # Scapy is free software: you can redistribute it and/or modify
3 # it under the terms of the GNU General Public License as published by
4 # the Free Software Foundation, either version 2 of the License, or
5 # any later version.
6 #
7 # Scapy is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # GNU General Public License for more details.
11 #
12 # You should have received a copy of the GNU General Public License
13 # along with Scapy. If not, see <http://www.gnu.org/licenses/>.
14
15 # scapy.contrib.description = Network Services Headers (NSH)
16 # scapy.contrib.status = loads
17
18 from scapy.all import bind_layers
19 from scapy.fields import BitField, ByteField, ByteEnumField, BitEnumField, \
20 ShortField, X3BytesField, XIntField, XStrFixedLenField, \
21 ConditionalField, PacketListField, BitFieldLenField
22 from scapy.layers.inet import Ether, IP
23 from scapy.layers.inet6 import IPv6
24 from scapy.layers.vxlan import VXLAN
25 from scapy.packet import Packet
26 from scapy.layers.l2 import GRE
27
28 from scapy.contrib.mpls import MPLS
29
30 #
31 # NSH Support
32 # https://www.rfc-editor.org/rfc/rfc8300.txt January 2018
33 #
34
35
36 class NSHTLV(Packet):
37 "NSH MD-type 2 - Variable Length Context Headers"
38 name = "NSHTLV"
39 fields_desc = [
40 ShortField('class', 0),
41 BitField('type', 0, 8),
42 BitField('reserved', 0, 1),
43 BitField('length', 0, 7),
44 PacketListField('metadata', None, XIntField, count_from='length')
45 ]
46
47
48 class NSH(Packet):
49 """Network Service Header.
50 NSH MD-type 1 if there is no ContextHeaders"""
51 name = "NSH"
52
53 fields_desc = [
54 BitField('ver', 0, 2),
55 BitField('oam', 0, 1),
56 BitField('unused1', 0, 1),
57 BitField('ttl', 63, 6),
58 BitFieldLenField('length', None, 6,
59 count_of='vlch',
60 adjust=lambda pkt, x: 6 if pkt.mdtype == 1
61 else x + 2),
62 BitField('unused2', 0, 4),
63 BitEnumField('mdtype', 1, 4, {0: 'Reserved MDType',
64 1: 'Fixed Length',
65 2: 'Variable Length',
66 0xF: 'Experimental MDType'}),
67 ByteEnumField('nextproto', 3, {1: 'IPv4',
68 2: 'IPv6',
69 3: 'Ethernet',
70 4: 'NSH',
71 5: 'MPLS',
72 0xFE: 'Experiment 1',
73 0xFF: 'Experiment 2'}),
74 X3BytesField('spi', 0),
75 ByteField('si', 0xFF),
76 ConditionalField(XStrFixedLenField("context_header", "", 16),
77 lambda pkt: pkt.mdtype == 1),
78 ConditionalField(PacketListField("vlch", None, NSHTLV,
79 count_from="length"),
80 lambda pkt: pkt.mdtype == 2)
81 ]
82
83 def mysummary(self):
84 return self.sprintf("SPI: %spi% - SI: %si%")
85
86
87 bind_layers(Ether, NSH, {'type': 0x894F}, type=0x894F)
88 bind_layers(VXLAN, NSH, {'flags': 0xC, 'nextprotocol': 4}, nextprotocol=4)
89 bind_layers(GRE, NSH, {'proto': 0x894F}, proto=0x894F)
90
91 bind_layers(NSH, IP, {'nextprotocol': 1}, nextprotocol=1)
92 bind_layers(NSH, IPv6, {'nextprotocol': 2}, nextprotocol=2)
93 bind_layers(NSH, Ether, {'nextprotocol': 3}, nextprotocol=3)
94 bind_layers(NSH, NSH, {'nextprotocol': 4}, nextprotocol=4)
95 bind_layers(NSH, MPLS, {'nextprotocol': 5}, nextprotocol=5)
96
[end of scapy/contrib/nsh.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scapy/contrib/nsh.py b/scapy/contrib/nsh.py
--- a/scapy/contrib/nsh.py
+++ b/scapy/contrib/nsh.py
@@ -85,11 +85,11 @@
bind_layers(Ether, NSH, {'type': 0x894F}, type=0x894F)
-bind_layers(VXLAN, NSH, {'flags': 0xC, 'nextprotocol': 4}, nextprotocol=4)
+bind_layers(VXLAN, NSH, {'flags': 0xC, 'nextproto': 4}, nextproto=4)
bind_layers(GRE, NSH, {'proto': 0x894F}, proto=0x894F)
-bind_layers(NSH, IP, {'nextprotocol': 1}, nextprotocol=1)
-bind_layers(NSH, IPv6, {'nextprotocol': 2}, nextprotocol=2)
-bind_layers(NSH, Ether, {'nextprotocol': 3}, nextprotocol=3)
-bind_layers(NSH, NSH, {'nextprotocol': 4}, nextprotocol=4)
-bind_layers(NSH, MPLS, {'nextprotocol': 5}, nextprotocol=5)
+bind_layers(NSH, IP, nextproto=1)
+bind_layers(NSH, IPv6, nextproto=2)
+bind_layers(NSH, Ether, nextproto=3)
+bind_layers(NSH, NSH, nextproto=4)
+bind_layers(NSH, MPLS, nextproto=5)
| {"golden_diff": "diff --git a/scapy/contrib/nsh.py b/scapy/contrib/nsh.py\n--- a/scapy/contrib/nsh.py\n+++ b/scapy/contrib/nsh.py\n@@ -85,11 +85,11 @@\n \n \n bind_layers(Ether, NSH, {'type': 0x894F}, type=0x894F)\n-bind_layers(VXLAN, NSH, {'flags': 0xC, 'nextprotocol': 4}, nextprotocol=4)\n+bind_layers(VXLAN, NSH, {'flags': 0xC, 'nextproto': 4}, nextproto=4)\n bind_layers(GRE, NSH, {'proto': 0x894F}, proto=0x894F)\n \n-bind_layers(NSH, IP, {'nextprotocol': 1}, nextprotocol=1)\n-bind_layers(NSH, IPv6, {'nextprotocol': 2}, nextprotocol=2)\n-bind_layers(NSH, Ether, {'nextprotocol': 3}, nextprotocol=3)\n-bind_layers(NSH, NSH, {'nextprotocol': 4}, nextprotocol=4)\n-bind_layers(NSH, MPLS, {'nextprotocol': 5}, nextprotocol=5)\n+bind_layers(NSH, IP, nextproto=1)\n+bind_layers(NSH, IPv6, nextproto=2)\n+bind_layers(NSH, Ether, nextproto=3)\n+bind_layers(NSH, NSH, nextproto=4)\n+bind_layers(NSH, MPLS, nextproto=5)\n", "issue": "Use nextproto property instead of nextprotocol\nThis is just a checklist to guide you. You can remove it safely.\r\n\r\n**Checklist:**\r\n\r\n- [x ] If you are new to Scapy: I have checked <https://github.com/secdev/scapy/blob/master/CONTRIBUTING.md> (esp. section submitting-pull-requests)\r\n- [ ] I squashed commits belonging together\r\n- [ ] I added unit tests or explained why they are not relevant\r\n- [ ] I executed the regression tests for Python2 and Python3 (using `tox` or, `cd test && ./run_tests_py2, cd test && ./run_tests_py3`)\r\n- [ ] If the PR is still not finished, please create a [Draft Pull Request](https://github.blog/2019-02-14-introducing-draft-pull-requests/)\r\n\r\n> brief description what this PR will do, e.g. fixes broken dissection of XXX\r\nFix wrong property in `bind_layers` function of NSH protocol. In the NSH class, it defines `nextproto` for next protocol property. \r\n\r\nI changed from `nextprotocol` to `nextproto` in `bind_layers` functions.\r\n\r\n> if required - short explanation why you fixed something in a way that may look more complicated as it actually is\r\n\r\n> if required - outline impacts on other parts of the library\r\n\n", "before_files": [{"content": "# This file is part of Scapy\n# Scapy is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# any later version.\n#\n# Scapy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Scapy. If not, see <http://www.gnu.org/licenses/>.\n\n# scapy.contrib.description = Network Services Headers (NSH)\n# scapy.contrib.status = loads\n\nfrom scapy.all import bind_layers\nfrom scapy.fields import BitField, ByteField, ByteEnumField, BitEnumField, \\\n ShortField, X3BytesField, XIntField, XStrFixedLenField, \\\n ConditionalField, PacketListField, BitFieldLenField\nfrom scapy.layers.inet import Ether, IP\nfrom scapy.layers.inet6 import IPv6\nfrom scapy.layers.vxlan import VXLAN\nfrom scapy.packet import Packet\nfrom scapy.layers.l2 import GRE\n\nfrom scapy.contrib.mpls import MPLS\n\n#\n# NSH Support\n# https://www.rfc-editor.org/rfc/rfc8300.txt January 2018\n#\n\n\nclass NSHTLV(Packet):\n \"NSH MD-type 2 - Variable Length Context Headers\"\n name = \"NSHTLV\"\n fields_desc = [\n ShortField('class', 0),\n BitField('type', 0, 8),\n BitField('reserved', 0, 1),\n BitField('length', 0, 7),\n PacketListField('metadata', None, XIntField, count_from='length')\n ]\n\n\nclass NSH(Packet):\n \"\"\"Network Service Header.\n NSH MD-type 1 if there is no ContextHeaders\"\"\"\n name = \"NSH\"\n\n fields_desc = [\n BitField('ver', 0, 2),\n BitField('oam', 0, 1),\n BitField('unused1', 0, 1),\n BitField('ttl', 63, 6),\n BitFieldLenField('length', None, 6,\n count_of='vlch',\n adjust=lambda pkt, x: 6 if pkt.mdtype == 1\n else x + 2),\n BitField('unused2', 0, 4),\n BitEnumField('mdtype', 1, 4, {0: 'Reserved MDType',\n 1: 'Fixed Length',\n 2: 'Variable Length',\n 0xF: 'Experimental MDType'}),\n ByteEnumField('nextproto', 3, {1: 'IPv4',\n 2: 'IPv6',\n 3: 'Ethernet',\n 4: 'NSH',\n 5: 'MPLS',\n 0xFE: 'Experiment 1',\n 0xFF: 'Experiment 2'}),\n X3BytesField('spi', 0),\n ByteField('si', 0xFF),\n ConditionalField(XStrFixedLenField(\"context_header\", \"\", 16),\n lambda pkt: pkt.mdtype == 1),\n ConditionalField(PacketListField(\"vlch\", None, NSHTLV,\n count_from=\"length\"),\n lambda pkt: pkt.mdtype == 2)\n ]\n\n def mysummary(self):\n return self.sprintf(\"SPI: %spi% - SI: %si%\")\n\n\nbind_layers(Ether, NSH, {'type': 0x894F}, type=0x894F)\nbind_layers(VXLAN, NSH, {'flags': 0xC, 'nextprotocol': 4}, nextprotocol=4)\nbind_layers(GRE, NSH, {'proto': 0x894F}, proto=0x894F)\n\nbind_layers(NSH, IP, {'nextprotocol': 1}, nextprotocol=1)\nbind_layers(NSH, IPv6, {'nextprotocol': 2}, nextprotocol=2)\nbind_layers(NSH, Ether, {'nextprotocol': 3}, nextprotocol=3)\nbind_layers(NSH, NSH, {'nextprotocol': 4}, nextprotocol=4)\nbind_layers(NSH, MPLS, {'nextprotocol': 5}, nextprotocol=5)\n", "path": "scapy/contrib/nsh.py"}]} | 1,995 | 336 |
gh_patches_debug_11202 | rasdani/github-patches | git_diff | fossasia__open-event-server-2181 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move Payment Gateways to own subtab
On `admin/settings/` add a subtab "Payment Gateways" and move the Paypal and Stripe here.


</issue>
<code>
[start of app/settings/__init__.py]
1 import stripe
2 from flask import current_app
3 from sqlalchemy import desc
4 from app.models.setting import Setting
5 from app.models.fees import TicketFees
6
7
8 def get_settings():
9 """
10 Use this to get latest system settings
11 """
12 if 'custom_settings' in current_app.config:
13 return current_app.config['custom_settings']
14 s = Setting.query.order_by(desc(Setting.id)).first()
15 if s is None:
16 set_settings(secret='super secret key')
17 else:
18 current_app.config['custom_settings'] = make_dict(s)
19 return current_app.config['custom_settings']
20
21
22 def set_settings(**kwargs):
23 """
24 Update system settings
25 """
26
27 if 'service_fee' in kwargs:
28 ticket_service_fees = kwargs.get('service_fee')
29 ticket_maximum_fees = kwargs.get('maximum_fee')
30 from app.helpers.data_getter import DataGetter
31 from app.helpers.data import save_to_db
32 currencies = DataGetter.get_payment_currencies()
33 for i, currency in enumerate(currencies):
34 currency = currency.split(' ')[0]
35 ticket_fee = TicketFees(currency=currency,
36 service_fee=ticket_service_fees[i],
37 maximum_fee=ticket_maximum_fees[i])
38 save_to_db(ticket_fee, "Ticket Fees settings saved")
39 else:
40 setting = Setting(**kwargs)
41 from app.helpers.data import save_to_db
42 save_to_db(setting, 'Setting saved')
43 current_app.secret_key = setting.secret
44 stripe.api_key = setting.stripe_secret_key
45 current_app.config['custom_settings'] = make_dict(setting)
46
47
48 def make_dict(s):
49 arguments = {}
50 for name, column in s.__mapper__.columns.items():
51 if not (column.primary_key or column.unique):
52 arguments[name] = getattr(s, name)
53 return arguments
54
[end of app/settings/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/settings/__init__.py b/app/settings/__init__.py
--- a/app/settings/__init__.py
+++ b/app/settings/__init__.py
@@ -30,7 +30,7 @@
from app.helpers.data_getter import DataGetter
from app.helpers.data import save_to_db
currencies = DataGetter.get_payment_currencies()
- for i, currency in enumerate(currencies):
+ for i, (currency, has_paypal, has_stripe) in enumerate(currencies):
currency = currency.split(' ')[0]
ticket_fee = TicketFees(currency=currency,
service_fee=ticket_service_fees[i],
| {"golden_diff": "diff --git a/app/settings/__init__.py b/app/settings/__init__.py\n--- a/app/settings/__init__.py\n+++ b/app/settings/__init__.py\n@@ -30,7 +30,7 @@\n from app.helpers.data_getter import DataGetter\n from app.helpers.data import save_to_db\n currencies = DataGetter.get_payment_currencies()\n- for i, currency in enumerate(currencies):\n+ for i, (currency, has_paypal, has_stripe) in enumerate(currencies):\n currency = currency.split(' ')[0]\n ticket_fee = TicketFees(currency=currency,\n service_fee=ticket_service_fees[i],\n", "issue": "Move Payment Gateways to own subtab\nOn `admin/settings/` add a subtab \"Payment Gateways\" and move the Paypal and Stripe here.\n\n\n\n\n\n", "before_files": [{"content": "import stripe\nfrom flask import current_app\nfrom sqlalchemy import desc\nfrom app.models.setting import Setting\nfrom app.models.fees import TicketFees\n\n\ndef get_settings():\n \"\"\"\n Use this to get latest system settings\n \"\"\"\n if 'custom_settings' in current_app.config:\n return current_app.config['custom_settings']\n s = Setting.query.order_by(desc(Setting.id)).first()\n if s is None:\n set_settings(secret='super secret key')\n else:\n current_app.config['custom_settings'] = make_dict(s)\n return current_app.config['custom_settings']\n\n\ndef set_settings(**kwargs):\n \"\"\"\n Update system settings\n \"\"\"\n\n if 'service_fee' in kwargs:\n ticket_service_fees = kwargs.get('service_fee')\n ticket_maximum_fees = kwargs.get('maximum_fee')\n from app.helpers.data_getter import DataGetter\n from app.helpers.data import save_to_db\n currencies = DataGetter.get_payment_currencies()\n for i, currency in enumerate(currencies):\n currency = currency.split(' ')[0]\n ticket_fee = TicketFees(currency=currency,\n service_fee=ticket_service_fees[i],\n maximum_fee=ticket_maximum_fees[i])\n save_to_db(ticket_fee, \"Ticket Fees settings saved\")\n else:\n setting = Setting(**kwargs)\n from app.helpers.data import save_to_db\n save_to_db(setting, 'Setting saved')\n current_app.secret_key = setting.secret\n stripe.api_key = setting.stripe_secret_key\n current_app.config['custom_settings'] = make_dict(setting)\n\n\ndef make_dict(s):\n arguments = {}\n for name, column in s.__mapper__.columns.items():\n if not (column.primary_key or column.unique):\n arguments[name] = getattr(s, name)\n return arguments\n", "path": "app/settings/__init__.py"}]} | 1,215 | 142 |
gh_patches_debug_23885 | rasdani/github-patches | git_diff | kedro-org__kedro-3587 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add official support for Python 3.12
## Description
<!-- Is your feature request related to a problem? A clear and concise description of what the problem is: "I'm always frustrated when ..." -->
Kedro itself probably works on Python 3.12 already, it would be nice to declare official support.
However, installing Kedro is one thing, but installing the typical dependencies might not be straightforward. For example, I just tested the spaceflights starter and most of the dependencies have already published precompiled wheels for Python 3.12 (at least for M1 Mac), but two of them are still problematic as of today:
- aiohttp https://github.com/aio-libs/aiohttp/issues/7739 worked by installing the beta version as advised there, so it will be solved soon (edit: fixed ✔️)
- pyzmq https://github.com/zeromq/pyzmq/issues/1907 (M1 specific), didn't work after installing the ZMQ header libraries with mamba (edit: fixed ✔️)
## Context
<!-- Why is this change important to you? How would you use it? How can it benefit other users? -->
#2815 was already completed, but officially Kedro does not support Python 3.12 yet.
You can use Kedro on Python 3.12 by manually disabling the warning.
## Possible Implementation
<!-- (Optional) Suggest an idea for implementing the addition or change. -->
Wait a bit until at least the spaceflights starter can be safely installed in most mainstream platforms.
## Possible Alternatives
<!-- (Optional) Describe any alternative solutions or features you've considered. -->
Declare Python 3.12 support already, at the cost of creating some grievance of users that then proceed to install some dependencies.
</issue>
<code>
[start of kedro/__init__.py]
1 """Kedro is a framework that makes it easy to build robust and scalable
2 data pipelines by providing uniform project templates, data abstraction,
3 configuration and pipeline assembly.
4 """
5
6 import sys
7 import warnings
8
9 __version__ = "0.19.3"
10
11
12 class KedroDeprecationWarning(DeprecationWarning):
13 """Custom class for warnings about deprecated Kedro features."""
14
15
16 class KedroPythonVersionWarning(UserWarning):
17 """Custom class for warnings about incompatibilities with Python versions."""
18
19
20 if not sys.warnoptions:
21 warnings.simplefilter("default", KedroDeprecationWarning)
22 warnings.simplefilter("error", KedroPythonVersionWarning)
23
24 if sys.version_info >= (3, 12):
25 warnings.warn(
26 """Kedro is not yet fully compatible with this Python version.
27 To proceed at your own risk and ignore this warning,
28 run Kedro with `python -W "default:Kedro is not yet fully compatible" -m kedro ...`
29 or set the PYTHONWARNINGS environment variable accordingly.""",
30 KedroPythonVersionWarning,
31 )
32
[end of kedro/__init__.py]
[start of kedro/config/abstract_config.py]
1 """This module provides ``kedro.abstract_config`` with the baseline
2 class model for a `ConfigLoader` implementation.
3 """
4 from __future__ import annotations
5
6 from collections import UserDict
7 from typing import Any
8
9
10 class AbstractConfigLoader(UserDict):
11 """``AbstractConfigLoader`` is the abstract base class
12 for all `ConfigLoader` implementations.
13 All user-defined `ConfigLoader` implementations should inherit
14 from `AbstractConfigLoader` and implement all relevant abstract methods.
15 """
16
17 def __init__(
18 self,
19 conf_source: str,
20 env: str | None = None,
21 runtime_params: dict[str, Any] | None = None,
22 **kwargs: Any,
23 ):
24 super().__init__()
25 self.conf_source = conf_source
26 self.env = env
27 self.runtime_params = runtime_params or {}
28
29
30 class BadConfigException(Exception):
31 """Raised when a configuration file cannot be loaded, for instance
32 due to wrong syntax or poor formatting.
33 """
34
35 pass
36
37
38 class MissingConfigException(Exception):
39 """Raised when no configuration files can be found within a config path"""
40
41 pass
42
[end of kedro/config/abstract_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kedro/__init__.py b/kedro/__init__.py
--- a/kedro/__init__.py
+++ b/kedro/__init__.py
@@ -21,7 +21,7 @@
warnings.simplefilter("default", KedroDeprecationWarning)
warnings.simplefilter("error", KedroPythonVersionWarning)
-if sys.version_info >= (3, 12):
+if sys.version_info >= (3, 13):
warnings.warn(
"""Kedro is not yet fully compatible with this Python version.
To proceed at your own risk and ignore this warning,
diff --git a/kedro/config/abstract_config.py b/kedro/config/abstract_config.py
--- a/kedro/config/abstract_config.py
+++ b/kedro/config/abstract_config.py
@@ -26,6 +26,17 @@
self.env = env
self.runtime_params = runtime_params or {}
+ # As of Python 3.12 __getitem__ is no longer called in the inherited UserDict.get()
+ # This causes AbstractConfigLoader.get() to break
+ # See: https://github.com/python/cpython/issues/105524
+ # Overwrite the inherited get function with the implementation from 3.11 and prior
+ def get(self, key: str, default: Any = None) -> Any:
+ "D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
class BadConfigException(Exception):
"""Raised when a configuration file cannot be loaded, for instance
| {"golden_diff": "diff --git a/kedro/__init__.py b/kedro/__init__.py\n--- a/kedro/__init__.py\n+++ b/kedro/__init__.py\n@@ -21,7 +21,7 @@\n warnings.simplefilter(\"default\", KedroDeprecationWarning)\n warnings.simplefilter(\"error\", KedroPythonVersionWarning)\n \n-if sys.version_info >= (3, 12):\n+if sys.version_info >= (3, 13):\n warnings.warn(\n \"\"\"Kedro is not yet fully compatible with this Python version.\n To proceed at your own risk and ignore this warning,\ndiff --git a/kedro/config/abstract_config.py b/kedro/config/abstract_config.py\n--- a/kedro/config/abstract_config.py\n+++ b/kedro/config/abstract_config.py\n@@ -26,6 +26,17 @@\n self.env = env\n self.runtime_params = runtime_params or {}\n \n+ # As of Python 3.12 __getitem__ is no longer called in the inherited UserDict.get()\n+ # This causes AbstractConfigLoader.get() to break\n+ # See: https://github.com/python/cpython/issues/105524\n+ # Overwrite the inherited get function with the implementation from 3.11 and prior\n+ def get(self, key: str, default: Any = None) -> Any:\n+ \"D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.\"\n+ try:\n+ return self[key]\n+ except KeyError:\n+ return default\n+\n \n class BadConfigException(Exception):\n \"\"\"Raised when a configuration file cannot be loaded, for instance\n", "issue": "Add official support for Python 3.12\n## Description\r\n<!-- Is your feature request related to a problem? A clear and concise description of what the problem is: \"I'm always frustrated when ...\" -->\r\nKedro itself probably works on Python 3.12 already, it would be nice to declare official support.\r\n\r\nHowever, installing Kedro is one thing, but installing the typical dependencies might not be straightforward. For example, I just tested the spaceflights starter and most of the dependencies have already published precompiled wheels for Python 3.12 (at least for M1 Mac), but two of them are still problematic as of today:\r\n\r\n- aiohttp https://github.com/aio-libs/aiohttp/issues/7739 worked by installing the beta version as advised there, so it will be solved soon (edit: fixed \u2714\ufe0f)\r\n- pyzmq https://github.com/zeromq/pyzmq/issues/1907 (M1 specific), didn't work after installing the ZMQ header libraries with mamba (edit: fixed \u2714\ufe0f)\r\n\r\n## Context\r\n<!-- Why is this change important to you? How would you use it? How can it benefit other users? -->\r\n#2815 was already completed, but officially Kedro does not support Python 3.12 yet.\r\n\r\nYou can use Kedro on Python 3.12 by manually disabling the warning.\r\n\r\n## Possible Implementation\r\n<!-- (Optional) Suggest an idea for implementing the addition or change. -->\r\nWait a bit until at least the spaceflights starter can be safely installed in most mainstream platforms.\r\n\r\n## Possible Alternatives\r\n<!-- (Optional) Describe any alternative solutions or features you've considered. -->\r\nDeclare Python 3.12 support already, at the cost of creating some grievance of users that then proceed to install some dependencies.\r\n\n", "before_files": [{"content": "\"\"\"Kedro is a framework that makes it easy to build robust and scalable\ndata pipelines by providing uniform project templates, data abstraction,\nconfiguration and pipeline assembly.\n\"\"\"\n\nimport sys\nimport warnings\n\n__version__ = \"0.19.3\"\n\n\nclass KedroDeprecationWarning(DeprecationWarning):\n \"\"\"Custom class for warnings about deprecated Kedro features.\"\"\"\n\n\nclass KedroPythonVersionWarning(UserWarning):\n \"\"\"Custom class for warnings about incompatibilities with Python versions.\"\"\"\n\n\nif not sys.warnoptions:\n warnings.simplefilter(\"default\", KedroDeprecationWarning)\n warnings.simplefilter(\"error\", KedroPythonVersionWarning)\n\nif sys.version_info >= (3, 12):\n warnings.warn(\n \"\"\"Kedro is not yet fully compatible with this Python version.\nTo proceed at your own risk and ignore this warning,\nrun Kedro with `python -W \"default:Kedro is not yet fully compatible\" -m kedro ...`\nor set the PYTHONWARNINGS environment variable accordingly.\"\"\",\n KedroPythonVersionWarning,\n )\n", "path": "kedro/__init__.py"}, {"content": "\"\"\"This module provides ``kedro.abstract_config`` with the baseline\nclass model for a `ConfigLoader` implementation.\n\"\"\"\nfrom __future__ import annotations\n\nfrom collections import UserDict\nfrom typing import Any\n\n\nclass AbstractConfigLoader(UserDict):\n \"\"\"``AbstractConfigLoader`` is the abstract base class\n for all `ConfigLoader` implementations.\n All user-defined `ConfigLoader` implementations should inherit\n from `AbstractConfigLoader` and implement all relevant abstract methods.\n \"\"\"\n\n def __init__(\n self,\n conf_source: str,\n env: str | None = None,\n runtime_params: dict[str, Any] | None = None,\n **kwargs: Any,\n ):\n super().__init__()\n self.conf_source = conf_source\n self.env = env\n self.runtime_params = runtime_params or {}\n\n\nclass BadConfigException(Exception):\n \"\"\"Raised when a configuration file cannot be loaded, for instance\n due to wrong syntax or poor formatting.\n \"\"\"\n\n pass\n\n\nclass MissingConfigException(Exception):\n \"\"\"Raised when no configuration files can be found within a config path\"\"\"\n\n pass\n", "path": "kedro/config/abstract_config.py"}]} | 1,551 | 379 |
gh_patches_debug_17700 | rasdani/github-patches | git_diff | DDMAL__CantusDB-210 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Chant-edit page doesn't load for Admin user
The chant-edit page (e.g., http://127.0.0.1:3122/edit-volpiano/702611?pk=705019) takes forever to load for Admin user.
I was logged in with my Admin account (i.e., superuser). Ideally, this should give me power to access and change anything.
I also check with my project manager account and it loaded fine.
</issue>
<code>
[start of django/cantusdb_project/users/managers.py]
1 # https://testdriven.io/blog/django-custom-user-model/#:~:text=The%20default%20User%20model%20in,either%20subclassing%20AbstractUser%20or%20AbstractBaseUser%20.
2
3 from django.contrib.auth.base_user import BaseUserManager
4 from django.utils.translation import gettext_lazy as _
5
6
7 class CustomUserManager(BaseUserManager):
8 """
9 Custom user model manager where email is the unique identifiers
10 for authentication instead of usernames.
11 """
12 def create_user(self, email, password, **extra_fields):
13 """
14 Create and save a User with the given email and password.
15 """
16 if not email:
17 raise ValueError(_('The Email must be set'))
18 email = self.normalize_email(email)
19 user = self.model(email=email, **extra_fields)
20 user.set_password(password)
21 user.save()
22 return user
23
24 def create_superuser(self, email, password, **extra_fields):
25 """
26 Create and save a SuperUser with the given email and password.
27 """
28 extra_fields.setdefault('is_staff', True)
29 extra_fields.setdefault('is_superuser', True)
30 extra_fields.setdefault('is_active', True)
31
32 if extra_fields.get('is_staff') is not True:
33 raise ValueError(_('Superuser must have is_staff=True.'))
34 if extra_fields.get('is_superuser') is not True:
35 raise ValueError(_('Superuser must have is_superuser=True.'))
36 return self.create_user(email, password, **extra_fields)
[end of django/cantusdb_project/users/managers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django/cantusdb_project/users/managers.py b/django/cantusdb_project/users/managers.py
--- a/django/cantusdb_project/users/managers.py
+++ b/django/cantusdb_project/users/managers.py
@@ -2,7 +2,7 @@
from django.contrib.auth.base_user import BaseUserManager
from django.utils.translation import gettext_lazy as _
-
+from django.contrib.auth.models import Group
class CustomUserManager(BaseUserManager):
"""
@@ -33,4 +33,7 @@
raise ValueError(_('Superuser must have is_staff=True.'))
if extra_fields.get('is_superuser') is not True:
raise ValueError(_('Superuser must have is_superuser=True.'))
- return self.create_user(email, password, **extra_fields)
\ No newline at end of file
+ user = self.create_user(email, password, **extra_fields)
+ pm = Group.objects.get(name='project manager')
+ pm.user_set.add(user)
+ return user
\ No newline at end of file
| {"golden_diff": "diff --git a/django/cantusdb_project/users/managers.py b/django/cantusdb_project/users/managers.py\n--- a/django/cantusdb_project/users/managers.py\n+++ b/django/cantusdb_project/users/managers.py\n@@ -2,7 +2,7 @@\n \n from django.contrib.auth.base_user import BaseUserManager\n from django.utils.translation import gettext_lazy as _\n-\n+from django.contrib.auth.models import Group\n \n class CustomUserManager(BaseUserManager):\n \"\"\"\n@@ -33,4 +33,7 @@\n raise ValueError(_('Superuser must have is_staff=True.'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n- return self.create_user(email, password, **extra_fields)\n\\ No newline at end of file\n+ user = self.create_user(email, password, **extra_fields)\n+ pm = Group.objects.get(name='project manager') \n+ pm.user_set.add(user)\n+ return user\n\\ No newline at end of file\n", "issue": "Chant-edit page doesn't load for Admin user\nThe chant-edit page (e.g., http://127.0.0.1:3122/edit-volpiano/702611?pk=705019) takes forever to load for Admin user. \r\nI was logged in with my Admin account (i.e., superuser). Ideally, this should give me power to access and change anything. \r\n\r\nI also check with my project manager account and it loaded fine.\n", "before_files": [{"content": "# https://testdriven.io/blog/django-custom-user-model/#:~:text=The%20default%20User%20model%20in,either%20subclassing%20AbstractUser%20or%20AbstractBaseUser%20.\n\nfrom django.contrib.auth.base_user import BaseUserManager\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass CustomUserManager(BaseUserManager):\n \"\"\"\n Custom user model manager where email is the unique identifiers\n for authentication instead of usernames.\n \"\"\"\n def create_user(self, email, password, **extra_fields):\n \"\"\"\n Create and save a User with the given email and password.\n \"\"\"\n if not email:\n raise ValueError(_('The Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user\n\n def create_superuser(self, email, password, **extra_fields):\n \"\"\"\n Create and save a SuperUser with the given email and password.\n \"\"\"\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(_('Superuser must have is_staff=True.'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(_('Superuser must have is_superuser=True.'))\n return self.create_user(email, password, **extra_fields)", "path": "django/cantusdb_project/users/managers.py"}]} | 1,041 | 230 |
gh_patches_debug_41565 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2259 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sticky cookies are improperly formatted.
##### Steps to reproduce the problem:
1. Go to http://www.html-kit.com/tools/cookietester/
2. Click 'Set Test Cookie'
3. Observe that one cookie is sent to the server.
4. Remove the cookie.
5. launch mitmproxy with `mitmproxy -t html-kit\.com` and tell your browser to use it as a proxy
6. Reload the page.
7. Click 'Set Test Cookie'
8. Observe that two 'cookies' are sent to the server.
##### Any other comments? What have you tried so far?
There appears to be a comma in the output of mitmproxy, even though it is surrounded by quotes. It's possible, then that this is a parsing fail on the tool's end caused by a difference in what's sent back for the format of the date. Still, should it really be changing that?
##### System information
Arch Linux, freshly updated.
Mitmproxy version: 2.0.1 (release version)
Python version: 3.6.0
Platform: Linux-4.10.6-1-ARCH-x86_64-with-glibc2.3.4
SSL version: OpenSSL 1.0.2k 26 Jan 2017
</issue>
<code>
[start of mitmproxy/addons/stickycookie.py]
1 import collections
2 from http import cookiejar
3
4 from mitmproxy.net.http import cookies
5
6 from mitmproxy import exceptions
7 from mitmproxy import flowfilter
8 from mitmproxy import ctx
9
10
11 def ckey(attrs, f):
12 """
13 Returns a (domain, port, path) tuple.
14 """
15 domain = f.request.host
16 path = "/"
17 if "domain" in attrs:
18 domain = attrs["domain"]
19 if "path" in attrs:
20 path = attrs["path"]
21 return (domain, f.request.port, path)
22
23
24 def domain_match(a, b):
25 if cookiejar.domain_match(a, b):
26 return True
27 elif cookiejar.domain_match(a, b.strip(".")):
28 return True
29 return False
30
31
32 class StickyCookie:
33 def __init__(self):
34 self.jar = collections.defaultdict(dict)
35 self.flt = None
36
37 def configure(self, updated):
38 if "stickycookie" in updated:
39 if ctx.options.stickycookie:
40 flt = flowfilter.parse(ctx.options.stickycookie)
41 if not flt:
42 raise exceptions.OptionsError(
43 "stickycookie: invalid filter expression: %s" % ctx.options.stickycookie
44 )
45 self.flt = flt
46 else:
47 self.flt = None
48
49 def response(self, flow):
50 if self.flt:
51 for name, (value, attrs) in flow.response.cookies.items(multi=True):
52 # FIXME: We now know that Cookie.py screws up some cookies with
53 # valid RFC 822/1123 datetime specifications for expiry. Sigh.
54 dom_port_path = ckey(attrs, flow)
55
56 if domain_match(flow.request.host, dom_port_path[0]):
57 if cookies.is_expired(attrs):
58 # Remove the cookie from jar
59 self.jar[dom_port_path].pop(name, None)
60
61 # If all cookies of a dom_port_path have been removed
62 # then remove it from the jar itself
63 if not self.jar[dom_port_path]:
64 self.jar.pop(dom_port_path, None)
65 else:
66 b = attrs.copy()
67 b.insert(0, name, value)
68 self.jar[dom_port_path][name] = b
69
70 def request(self, flow):
71 if self.flt:
72 l = []
73 if flowfilter.match(self.flt, flow):
74 for domain, port, path in self.jar.keys():
75 match = [
76 domain_match(flow.request.host, domain),
77 flow.request.port == port,
78 flow.request.path.startswith(path)
79 ]
80 if all(match):
81 c = self.jar[(domain, port, path)]
82 l.extend([cookies.format_cookie_header(c[name].items(multi=True)) for name in c.keys()])
83 if l:
84 # FIXME: we need to formalise this...
85 flow.request.stickycookie = True
86 flow.request.headers["cookie"] = "; ".join(l)
87
[end of mitmproxy/addons/stickycookie.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/addons/stickycookie.py b/mitmproxy/addons/stickycookie.py
--- a/mitmproxy/addons/stickycookie.py
+++ b/mitmproxy/addons/stickycookie.py
@@ -1,14 +1,14 @@
import collections
from http import cookiejar
+from typing import List, Tuple, Dict, Optional # noqa
+from mitmproxy import http, flowfilter, ctx, exceptions
from mitmproxy.net.http import cookies
-from mitmproxy import exceptions
-from mitmproxy import flowfilter
-from mitmproxy import ctx
+TOrigin = Tuple[str, int, str]
-def ckey(attrs, f):
+def ckey(attrs: Dict[str, str], f: http.HTTPFlow) -> TOrigin:
"""
Returns a (domain, port, path) tuple.
"""
@@ -21,18 +21,18 @@
return (domain, f.request.port, path)
-def domain_match(a, b):
- if cookiejar.domain_match(a, b):
+def domain_match(a: str, b: str) -> bool:
+ if cookiejar.domain_match(a, b): # type: ignore
return True
- elif cookiejar.domain_match(a, b.strip(".")):
+ elif cookiejar.domain_match(a, b.strip(".")): # type: ignore
return True
return False
class StickyCookie:
def __init__(self):
- self.jar = collections.defaultdict(dict)
- self.flt = None
+ self.jar = collections.defaultdict(dict) # type: Dict[TOrigin, Dict[str, str]]
+ self.flt = None # type: Optional[flowfilter.TFilter]
def configure(self, updated):
if "stickycookie" in updated:
@@ -46,7 +46,7 @@
else:
self.flt = None
- def response(self, flow):
+ def response(self, flow: http.HTTPFlow):
if self.flt:
for name, (value, attrs) in flow.response.cookies.items(multi=True):
# FIXME: We now know that Cookie.py screws up some cookies with
@@ -63,24 +63,21 @@
if not self.jar[dom_port_path]:
self.jar.pop(dom_port_path, None)
else:
- b = attrs.copy()
- b.insert(0, name, value)
- self.jar[dom_port_path][name] = b
+ self.jar[dom_port_path][name] = value
- def request(self, flow):
+ def request(self, flow: http.HTTPFlow):
if self.flt:
- l = []
+ cookie_list = [] # type: List[Tuple[str,str]]
if flowfilter.match(self.flt, flow):
- for domain, port, path in self.jar.keys():
+ for (domain, port, path), c in self.jar.items():
match = [
domain_match(flow.request.host, domain),
flow.request.port == port,
flow.request.path.startswith(path)
]
if all(match):
- c = self.jar[(domain, port, path)]
- l.extend([cookies.format_cookie_header(c[name].items(multi=True)) for name in c.keys()])
- if l:
+ cookie_list.extend(c.items())
+ if cookie_list:
# FIXME: we need to formalise this...
- flow.request.stickycookie = True
- flow.request.headers["cookie"] = "; ".join(l)
+ flow.metadata["stickycookie"] = True
+ flow.request.headers["cookie"] = cookies.format_cookie_header(cookie_list)
| {"golden_diff": "diff --git a/mitmproxy/addons/stickycookie.py b/mitmproxy/addons/stickycookie.py\n--- a/mitmproxy/addons/stickycookie.py\n+++ b/mitmproxy/addons/stickycookie.py\n@@ -1,14 +1,14 @@\n import collections\n from http import cookiejar\n+from typing import List, Tuple, Dict, Optional # noqa\n \n+from mitmproxy import http, flowfilter, ctx, exceptions\n from mitmproxy.net.http import cookies\n \n-from mitmproxy import exceptions\n-from mitmproxy import flowfilter\n-from mitmproxy import ctx\n+TOrigin = Tuple[str, int, str]\n \n \n-def ckey(attrs, f):\n+def ckey(attrs: Dict[str, str], f: http.HTTPFlow) -> TOrigin:\n \"\"\"\n Returns a (domain, port, path) tuple.\n \"\"\"\n@@ -21,18 +21,18 @@\n return (domain, f.request.port, path)\n \n \n-def domain_match(a, b):\n- if cookiejar.domain_match(a, b):\n+def domain_match(a: str, b: str) -> bool:\n+ if cookiejar.domain_match(a, b): # type: ignore\n return True\n- elif cookiejar.domain_match(a, b.strip(\".\")):\n+ elif cookiejar.domain_match(a, b.strip(\".\")): # type: ignore\n return True\n return False\n \n \n class StickyCookie:\n def __init__(self):\n- self.jar = collections.defaultdict(dict)\n- self.flt = None\n+ self.jar = collections.defaultdict(dict) # type: Dict[TOrigin, Dict[str, str]]\n+ self.flt = None # type: Optional[flowfilter.TFilter]\n \n def configure(self, updated):\n if \"stickycookie\" in updated:\n@@ -46,7 +46,7 @@\n else:\n self.flt = None\n \n- def response(self, flow):\n+ def response(self, flow: http.HTTPFlow):\n if self.flt:\n for name, (value, attrs) in flow.response.cookies.items(multi=True):\n # FIXME: We now know that Cookie.py screws up some cookies with\n@@ -63,24 +63,21 @@\n if not self.jar[dom_port_path]:\n self.jar.pop(dom_port_path, None)\n else:\n- b = attrs.copy()\n- b.insert(0, name, value)\n- self.jar[dom_port_path][name] = b\n+ self.jar[dom_port_path][name] = value\n \n- def request(self, flow):\n+ def request(self, flow: http.HTTPFlow):\n if self.flt:\n- l = []\n+ cookie_list = [] # type: List[Tuple[str,str]]\n if flowfilter.match(self.flt, flow):\n- for domain, port, path in self.jar.keys():\n+ for (domain, port, path), c in self.jar.items():\n match = [\n domain_match(flow.request.host, domain),\n flow.request.port == port,\n flow.request.path.startswith(path)\n ]\n if all(match):\n- c = self.jar[(domain, port, path)]\n- l.extend([cookies.format_cookie_header(c[name].items(multi=True)) for name in c.keys()])\n- if l:\n+ cookie_list.extend(c.items())\n+ if cookie_list:\n # FIXME: we need to formalise this...\n- flow.request.stickycookie = True\n- flow.request.headers[\"cookie\"] = \"; \".join(l)\n+ flow.metadata[\"stickycookie\"] = True\n+ flow.request.headers[\"cookie\"] = cookies.format_cookie_header(cookie_list)\n", "issue": "Sticky cookies are improperly formatted.\n##### Steps to reproduce the problem:\r\n\r\n1. Go to http://www.html-kit.com/tools/cookietester/\r\n2. Click 'Set Test Cookie'\r\n3. Observe that one cookie is sent to the server.\r\n4. Remove the cookie.\r\n5. launch mitmproxy with `mitmproxy -t html-kit\\.com` and tell your browser to use it as a proxy\r\n6. Reload the page.\r\n7. Click 'Set Test Cookie'\r\n8. Observe that two 'cookies' are sent to the server.\r\n\r\n##### Any other comments? What have you tried so far?\r\nThere appears to be a comma in the output of mitmproxy, even though it is surrounded by quotes. It's possible, then that this is a parsing fail on the tool's end caused by a difference in what's sent back for the format of the date. Still, should it really be changing that?\r\n\r\n##### System information\r\nArch Linux, freshly updated.\r\n\r\nMitmproxy version: 2.0.1 (release version) \r\nPython version: 3.6.0\r\nPlatform: Linux-4.10.6-1-ARCH-x86_64-with-glibc2.3.4\r\nSSL version: OpenSSL 1.0.2k 26 Jan 2017\r\n\n", "before_files": [{"content": "import collections\nfrom http import cookiejar\n\nfrom mitmproxy.net.http import cookies\n\nfrom mitmproxy import exceptions\nfrom mitmproxy import flowfilter\nfrom mitmproxy import ctx\n\n\ndef ckey(attrs, f):\n \"\"\"\n Returns a (domain, port, path) tuple.\n \"\"\"\n domain = f.request.host\n path = \"/\"\n if \"domain\" in attrs:\n domain = attrs[\"domain\"]\n if \"path\" in attrs:\n path = attrs[\"path\"]\n return (domain, f.request.port, path)\n\n\ndef domain_match(a, b):\n if cookiejar.domain_match(a, b):\n return True\n elif cookiejar.domain_match(a, b.strip(\".\")):\n return True\n return False\n\n\nclass StickyCookie:\n def __init__(self):\n self.jar = collections.defaultdict(dict)\n self.flt = None\n\n def configure(self, updated):\n if \"stickycookie\" in updated:\n if ctx.options.stickycookie:\n flt = flowfilter.parse(ctx.options.stickycookie)\n if not flt:\n raise exceptions.OptionsError(\n \"stickycookie: invalid filter expression: %s\" % ctx.options.stickycookie\n )\n self.flt = flt\n else:\n self.flt = None\n\n def response(self, flow):\n if self.flt:\n for name, (value, attrs) in flow.response.cookies.items(multi=True):\n # FIXME: We now know that Cookie.py screws up some cookies with\n # valid RFC 822/1123 datetime specifications for expiry. Sigh.\n dom_port_path = ckey(attrs, flow)\n\n if domain_match(flow.request.host, dom_port_path[0]):\n if cookies.is_expired(attrs):\n # Remove the cookie from jar\n self.jar[dom_port_path].pop(name, None)\n\n # If all cookies of a dom_port_path have been removed\n # then remove it from the jar itself\n if not self.jar[dom_port_path]:\n self.jar.pop(dom_port_path, None)\n else:\n b = attrs.copy()\n b.insert(0, name, value)\n self.jar[dom_port_path][name] = b\n\n def request(self, flow):\n if self.flt:\n l = []\n if flowfilter.match(self.flt, flow):\n for domain, port, path in self.jar.keys():\n match = [\n domain_match(flow.request.host, domain),\n flow.request.port == port,\n flow.request.path.startswith(path)\n ]\n if all(match):\n c = self.jar[(domain, port, path)]\n l.extend([cookies.format_cookie_header(c[name].items(multi=True)) for name in c.keys()])\n if l:\n # FIXME: we need to formalise this...\n flow.request.stickycookie = True\n flow.request.headers[\"cookie\"] = \"; \".join(l)\n", "path": "mitmproxy/addons/stickycookie.py"}]} | 1,614 | 798 |
gh_patches_debug_19676 | rasdani/github-patches | git_diff | holoviz__holoviews-1845 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Table broken with bokeh 0.12.7
When displaying a Table with bokeh 0.12.7 I currently see the following error:
```
Javascript error adding output!
Error: SlickGrid's 'enableColumnReorder = true' option requires jquery-ui.sortable module to be loaded
See your browser Javascript console for more details.
```
</issue>
<code>
[start of holoviews/plotting/bokeh/tabular.py]
1 from bokeh.models.widgets import DataTable, TableColumn
2
3 import param
4
5 import numpy as np
6 from ...core import Dataset
7 from ...element import ItemTable
8 from ..plot import GenericElementPlot
9 from .plot import BokehPlot
10
11 class TablePlot(BokehPlot, GenericElementPlot):
12
13 height = param.Number(default=None)
14
15 width = param.Number(default=400)
16
17 style_opts = ['row_headers', 'selectable', 'editable',
18 'sortable', 'fit_columns', 'width', 'height']
19
20 finalize_hooks = param.HookList(default=[], doc="""
21 Optional list of hooks called when finalizing a column.
22 The hook is passed the plot object and the displayed
23 object, and other plotting handles can be accessed via plot.handles.""")
24
25 _update_handles = ['source', 'glyph']
26
27 def __init__(self, element, plot=None, **params):
28 super(TablePlot, self).__init__(element, **params)
29 self.handles = {} if plot is None else self.handles['plot']
30 element_ids = self.hmap.traverse(lambda x: id(x), [Dataset, ItemTable])
31 self.static = len(set(element_ids)) == 1 and len(self.keys) == len(self.hmap)
32 self.callbacks = [] # Callback support on tables not implemented
33
34
35 def _execute_hooks(self, element):
36 """
37 Executes finalize hooks
38 """
39 for hook in self.finalize_hooks:
40 try:
41 hook(self, element)
42 except Exception as e:
43 self.warning("Plotting hook %r could not be applied:\n\n %s" % (hook, e))
44
45
46 def get_data(self, element, ranges=None, empty=False):
47 dims = element.dimensions()
48 data = {d: np.array([]) if empty else element.dimension_values(d)
49 for d in dims}
50 mapping = {d.name: d.name for d in dims}
51 data = {d.name: values if values.dtype.kind in "if" else list(map(d.pprint_value, values))
52 for d, values in data.items()}
53 return data, mapping
54
55
56 def initialize_plot(self, ranges=None, plot=None, plots=None, source=None):
57 """
58 Initializes a new plot object with the last available frame.
59 """
60 # Get element key and ranges for frame
61 element = self.hmap.last
62 key = self.keys[-1]
63 self.current_frame = element
64 self.current_key = key
65
66 data, _ = self.get_data(element, ranges)
67 if source is None:
68 source = self._init_datasource(data)
69 self.handles['source'] = source
70
71 dims = element.dimensions()
72 columns = [TableColumn(field=d.name, title=d.pprint_label) for d in dims]
73 properties = self.lookup_options(element, 'style')[self.cyclic_index]
74 table = DataTable(source=source, columns=columns, height=self.height,
75 width=self.width, **properties)
76 self.handles['plot'] = table
77 self.handles['glyph_renderer'] = table
78 self._execute_hooks(element)
79 self.drawn = True
80
81 return table
82
83
84 @property
85 def current_handles(self):
86 """
87 Returns a list of the plot objects to update.
88 """
89 handles = []
90 if self.static and not self.dynamic:
91 return handles
92
93
94 element = self.current_frame
95 previous_id = self.handles.get('previous_id', None)
96 current_id = None if self.current_frame is None else element._plot_id
97 for handle in self._update_handles:
98 if (handle == 'source' and self.dynamic and current_id == previous_id):
99 continue
100 if handle in self.handles:
101 handles.append(self.handles[handle])
102
103 # Cache frame object id to skip updating if unchanged
104 if self.dynamic:
105 self.handles['previous_id'] = current_id
106
107 return handles
108
109
110 def update_frame(self, key, ranges=None, plot=None):
111 """
112 Updates an existing plot with data corresponding
113 to the key.
114 """
115 element = self._get_frame(key)
116 source = self.handles['source']
117 data, _ = self.get_data(element, ranges)
118 self._update_datasource(source, data)
119
[end of holoviews/plotting/bokeh/tabular.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/holoviews/plotting/bokeh/tabular.py b/holoviews/plotting/bokeh/tabular.py
--- a/holoviews/plotting/bokeh/tabular.py
+++ b/holoviews/plotting/bokeh/tabular.py
@@ -7,6 +7,8 @@
from ...element import ItemTable
from ..plot import GenericElementPlot
from .plot import BokehPlot
+from .util import bokeh_version
+
class TablePlot(BokehPlot, GenericElementPlot):
@@ -71,6 +73,8 @@
dims = element.dimensions()
columns = [TableColumn(field=d.name, title=d.pprint_label) for d in dims]
properties = self.lookup_options(element, 'style')[self.cyclic_index]
+ if bokeh_version > '0.12.7':
+ properties['reorderable'] = False
table = DataTable(source=source, columns=columns, height=self.height,
width=self.width, **properties)
self.handles['plot'] = table
| {"golden_diff": "diff --git a/holoviews/plotting/bokeh/tabular.py b/holoviews/plotting/bokeh/tabular.py\n--- a/holoviews/plotting/bokeh/tabular.py\n+++ b/holoviews/plotting/bokeh/tabular.py\n@@ -7,6 +7,8 @@\n from ...element import ItemTable\n from ..plot import GenericElementPlot\n from .plot import BokehPlot\n+from .util import bokeh_version\n+\n \n class TablePlot(BokehPlot, GenericElementPlot):\n \n@@ -71,6 +73,8 @@\n dims = element.dimensions()\n columns = [TableColumn(field=d.name, title=d.pprint_label) for d in dims]\n properties = self.lookup_options(element, 'style')[self.cyclic_index]\n+ if bokeh_version > '0.12.7':\n+ properties['reorderable'] = False\n table = DataTable(source=source, columns=columns, height=self.height,\n width=self.width, **properties)\n self.handles['plot'] = table\n", "issue": "Table broken with bokeh 0.12.7\nWhen displaying a Table with bokeh 0.12.7 I currently see the following error:\r\n\r\n```\r\nJavascript error adding output!\r\nError: SlickGrid's 'enableColumnReorder = true' option requires jquery-ui.sortable module to be loaded\r\nSee your browser Javascript console for more details.\r\n```\n", "before_files": [{"content": "from bokeh.models.widgets import DataTable, TableColumn\n\nimport param\n\nimport numpy as np\nfrom ...core import Dataset\nfrom ...element import ItemTable\nfrom ..plot import GenericElementPlot\nfrom .plot import BokehPlot\n\nclass TablePlot(BokehPlot, GenericElementPlot):\n\n height = param.Number(default=None)\n\n width = param.Number(default=400)\n\n style_opts = ['row_headers', 'selectable', 'editable',\n 'sortable', 'fit_columns', 'width', 'height']\n\n finalize_hooks = param.HookList(default=[], doc=\"\"\"\n Optional list of hooks called when finalizing a column.\n The hook is passed the plot object and the displayed\n object, and other plotting handles can be accessed via plot.handles.\"\"\")\n\n _update_handles = ['source', 'glyph']\n\n def __init__(self, element, plot=None, **params):\n super(TablePlot, self).__init__(element, **params)\n self.handles = {} if plot is None else self.handles['plot']\n element_ids = self.hmap.traverse(lambda x: id(x), [Dataset, ItemTable])\n self.static = len(set(element_ids)) == 1 and len(self.keys) == len(self.hmap)\n self.callbacks = [] # Callback support on tables not implemented\n\n\n def _execute_hooks(self, element):\n \"\"\"\n Executes finalize hooks\n \"\"\"\n for hook in self.finalize_hooks:\n try:\n hook(self, element)\n except Exception as e:\n self.warning(\"Plotting hook %r could not be applied:\\n\\n %s\" % (hook, e))\n\n\n def get_data(self, element, ranges=None, empty=False):\n dims = element.dimensions()\n data = {d: np.array([]) if empty else element.dimension_values(d)\n for d in dims}\n mapping = {d.name: d.name for d in dims}\n data = {d.name: values if values.dtype.kind in \"if\" else list(map(d.pprint_value, values))\n for d, values in data.items()}\n return data, mapping\n\n\n def initialize_plot(self, ranges=None, plot=None, plots=None, source=None):\n \"\"\"\n Initializes a new plot object with the last available frame.\n \"\"\"\n # Get element key and ranges for frame\n element = self.hmap.last\n key = self.keys[-1]\n self.current_frame = element\n self.current_key = key\n\n data, _ = self.get_data(element, ranges)\n if source is None:\n source = self._init_datasource(data)\n self.handles['source'] = source\n\n dims = element.dimensions()\n columns = [TableColumn(field=d.name, title=d.pprint_label) for d in dims]\n properties = self.lookup_options(element, 'style')[self.cyclic_index]\n table = DataTable(source=source, columns=columns, height=self.height,\n width=self.width, **properties)\n self.handles['plot'] = table\n self.handles['glyph_renderer'] = table\n self._execute_hooks(element)\n self.drawn = True\n\n return table\n\n\n @property\n def current_handles(self):\n \"\"\"\n Returns a list of the plot objects to update.\n \"\"\"\n handles = []\n if self.static and not self.dynamic:\n return handles\n\n\n element = self.current_frame\n previous_id = self.handles.get('previous_id', None)\n current_id = None if self.current_frame is None else element._plot_id\n for handle in self._update_handles:\n if (handle == 'source' and self.dynamic and current_id == previous_id):\n continue\n if handle in self.handles:\n handles.append(self.handles[handle])\n\n # Cache frame object id to skip updating if unchanged\n if self.dynamic:\n self.handles['previous_id'] = current_id\n\n return handles\n\n\n def update_frame(self, key, ranges=None, plot=None):\n \"\"\"\n Updates an existing plot with data corresponding\n to the key.\n \"\"\"\n element = self._get_frame(key)\n source = self.handles['source']\n data, _ = self.get_data(element, ranges)\n self._update_datasource(source, data)\n", "path": "holoviews/plotting/bokeh/tabular.py"}]} | 1,794 | 241 |
gh_patches_debug_20905 | rasdani/github-patches | git_diff | nvaccess__nvda-11972 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dev docs: globalVars.appDir is not defined when attempting to build docs with Sphinx
Hi,
Related to #11970 and actually blocks it:
### Steps to reproduce:
When trying to build dev docs using "scons devDocs":
1. Run scons devDocs.
2. Once Sphinx is instlaled and ready, Sphinx will try to build dev docs for the source code.
### Actual behavior:
A traceback shows up, ending with:
AttributeError: module 'globalVars' has no attribute 'appDir'
### Expected behavior:
No errors with the dev docs building completing.
### System configuration
#### NVDA installed/portable/running from source:
Source
#### NVDA version:
Alpha-21561,7e5ffde2391c
#### Windows version:
Windows 10 Version 20H2 (build 19042.685)
#### Name and version of other software in use when reproducing the issue:
Python 3.7.9
#### Other information about your system:
N/A
### Other questions
#### Does the issue still occur after restarting your computer?
Yes
#### Have you tried any other versions of NVDA? If so, please report their behaviors.
Not applicable
#### If addons are disabled, is your problem still occurring?
Not applicable
#### Did you try to run the COM registry fixing tool in NVDA menu / tools?
Not applicable
### Cause:
This is caused by config file error, specifically when a mock config.conf instance is created. Prior to this, importing config module fails because globalVars.appDir is not defined by the time scons devDocs is run.
### Solution:
one solution is to define globalVars.appDir to point to the source directory.
Thanks.
</issue>
<code>
[start of devDocs/conf.py]
1 # A part of NonVisual Desktop Access (NVDA)
2 # Copyright (C) 2019 NV Access Limited, Leonard de Ruijter
3 # This file is covered by the GNU General Public License.
4 # See the file COPYING for more details.
5
6 # Configuration file for the Sphinx documentation builder.
7
8 # -- Path setup --------------------------------------------------------------
9
10 import os
11 import sys
12 sys.path.insert(0, os.path.abspath('../source'))
13 import sourceEnv # noqa: F401, E402
14
15 # Initialize languageHandler so that sphinx is able to deal with translatable strings.
16 import languageHandler # noqa: E402
17 languageHandler.setLanguage("en")
18
19 # Initialize globalvars.appArgs to something sensible.
20 import globalVars # noqa: E402
21
22
23 class AppArgs:
24 # Set an empty comnfig path
25 # This is never used as we don't initialize config, but some modules expect this to be set.
26 configPath = ""
27 secure = False
28 disableAddons = True
29 launcher = False
30
31
32 globalVars.appArgs = AppArgs()
33
34 # Import NVDA's versionInfo module.
35 import versionInfo # noqa: E402
36 # Set a suitable updateVersionType for the updateCheck module to be imported
37 versionInfo.updateVersionType = "stable"
38
39 # -- Project information -----------------------------------------------------
40
41 project = versionInfo.name
42 copyright = versionInfo.copyright
43 author = versionInfo.publisher
44
45 # The major project version
46 version = versionInfo.formatVersionForGUI(
47 versionInfo.version_year,
48 versionInfo.version_major,
49 versionInfo.version_minor
50 )
51
52 # The full version, including alpha/beta/rc tags
53 release = versionInfo.version
54
55 # -- General configuration ---------------------------------------------------
56
57 default_role = 'py:obj'
58
59 # Add any Sphinx extension module names here, as strings. They can be
60 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
61 # ones.
62 extensions = [
63 'sphinx.ext.autodoc',
64 ]
65
66 # Add any paths that contain templates here, relative to this directory.
67 templates_path = ['_templates']
68
69 # List of patterns, relative to source directory, that match files and
70 # directories to ignore when looking for source files.
71 # This pattern also affects html_static_path and html_extra_path.
72 exclude_patterns = [
73 "_build"
74 ]
75
76
77 # -- Options for HTML output -------------------------------------------------
78
79 # The theme to use for HTML and HTML Help pages.
80
81 html_theme = "sphinx_rtd_theme"
82
83 # Add any paths that contain custom static files (such as style sheets) here,
84 # relative to this directory. They are copied after the builtin static files,
85 # so a file named "default.css" will overwrite the builtin "default.css".
86 html_static_path = ['_static']
87
88 # -- Extension configuration -------------------------------------------------
89
90 # sphinx.ext.autodoc configuration
91
92 # Both the class’ and the __init__ method’s docstring are concatenated and inserted.
93 autoclass_content = "both"
94 autodoc_member_order = 'bysource'
95 autodoc_mock_imports = [
96 "louis", # Not our project
97 ]
98
99 # Perform some manual mocking of specific objects.
100 # autodoc can only mock modules, not objects.
101 from sphinx.ext.autodoc.mock import _make_subclass # noqa: E402
102
103 import config # noqa: E402
104 # Mock an instance of the configuration manager.
105 config.conf = _make_subclass("conf", "config")()
106
[end of devDocs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/devDocs/conf.py b/devDocs/conf.py
--- a/devDocs/conf.py
+++ b/devDocs/conf.py
@@ -1,5 +1,5 @@
# A part of NonVisual Desktop Access (NVDA)
-# Copyright (C) 2019 NV Access Limited, Leonard de Ruijter
+# Copyright (C) 2019-2020 NV Access Limited, Leonard de Ruijter, Joseph Lee
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
@@ -16,7 +16,7 @@
import languageHandler # noqa: E402
languageHandler.setLanguage("en")
-# Initialize globalvars.appArgs to something sensible.
+# Initialize globalVars.appArgs to something sensible.
import globalVars # noqa: E402
@@ -30,6 +30,11 @@
globalVars.appArgs = AppArgs()
+# #11971: NVDA is not running, therefore app dir is undefined.
+# Therefore tell NVDA that apt source directory is app dir.
+appDir = os.path.join("..", "source")
+globalVars.appDir = os.path.abspath(appDir)
+
# Import NVDA's versionInfo module.
import versionInfo # noqa: E402
| {"golden_diff": "diff --git a/devDocs/conf.py b/devDocs/conf.py\n--- a/devDocs/conf.py\n+++ b/devDocs/conf.py\n@@ -1,5 +1,5 @@\n # A part of NonVisual Desktop Access (NVDA)\n-# Copyright (C) 2019 NV Access Limited, Leonard de Ruijter\n+# Copyright (C) 2019-2020 NV Access Limited, Leonard de Ruijter, Joseph Lee\n # This file is covered by the GNU General Public License.\n # See the file COPYING for more details.\n \n@@ -16,7 +16,7 @@\n import languageHandler # noqa: E402\n languageHandler.setLanguage(\"en\")\n \n-# Initialize globalvars.appArgs to something sensible.\n+# Initialize globalVars.appArgs to something sensible.\n import globalVars # noqa: E402\n \n \n@@ -30,6 +30,11 @@\n \n \n globalVars.appArgs = AppArgs()\n+# #11971: NVDA is not running, therefore app dir is undefined.\n+# Therefore tell NVDA that apt source directory is app dir.\n+appDir = os.path.join(\"..\", \"source\")\n+globalVars.appDir = os.path.abspath(appDir)\n+\n \n # Import NVDA's versionInfo module.\n import versionInfo # noqa: E402\n", "issue": "Dev docs: globalVars.appDir is not defined when attempting to build docs with Sphinx\nHi,\r\nRelated to #11970 and actually blocks it:\r\n\r\n### Steps to reproduce:\r\nWhen trying to build dev docs using \"scons devDocs\":\r\n\r\n1. Run scons devDocs.\r\n2. Once Sphinx is instlaled and ready, Sphinx will try to build dev docs for the source code.\r\n\r\n### Actual behavior:\r\nA traceback shows up, ending with:\r\nAttributeError: module 'globalVars' has no attribute 'appDir'\r\n\r\n### Expected behavior:\r\nNo errors with the dev docs building completing.\r\n\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\nSource\r\n\r\n#### NVDA version:\r\nAlpha-21561,7e5ffde2391c\r\n\r\n#### Windows version:\r\nWindows 10 Version 20H2 (build 19042.685)\r\n\r\n#### Name and version of other software in use when reproducing the issue:\r\nPython 3.7.9\r\n\r\n#### Other information about your system:\r\nN/A\r\n\r\n### Other questions\r\n#### Does the issue still occur after restarting your computer?\r\nYes\r\n\r\n#### Have you tried any other versions of NVDA? If so, please report their behaviors.\r\nNot applicable\r\n\r\n#### If addons are disabled, is your problem still occurring?\r\nNot applicable\r\n\r\n#### Did you try to run the COM registry fixing tool in NVDA menu / tools?\r\nNot applicable\r\n\r\n### Cause:\r\nThis is caused by config file error, specifically when a mock config.conf instance is created. Prior to this, importing config module fails because globalVars.appDir is not defined by the time scons devDocs is run.\r\n\r\n### Solution:\r\none solution is to define globalVars.appDir to point to the source directory.\r\n\r\nThanks.\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\n# Copyright (C) 2019 NV Access Limited, Leonard de Ruijter\n# This file is covered by the GNU General Public License.\n# See the file COPYING for more details.\n\n# Configuration file for the Sphinx documentation builder.\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('../source'))\nimport sourceEnv # noqa: F401, E402\n\n# Initialize languageHandler so that sphinx is able to deal with translatable strings.\nimport languageHandler # noqa: E402\nlanguageHandler.setLanguage(\"en\")\n\n# Initialize globalvars.appArgs to something sensible.\nimport globalVars # noqa: E402\n\n\nclass AppArgs:\n\t# Set an empty comnfig path\n\t# This is never used as we don't initialize config, but some modules expect this to be set.\n\tconfigPath = \"\"\n\tsecure = False\n\tdisableAddons = True\n\tlauncher = False\n\n\nglobalVars.appArgs = AppArgs()\n\n# Import NVDA's versionInfo module.\nimport versionInfo # noqa: E402\n# Set a suitable updateVersionType for the updateCheck module to be imported\nversionInfo.updateVersionType = \"stable\"\n\n# -- Project information -----------------------------------------------------\n\nproject = versionInfo.name\ncopyright = versionInfo.copyright\nauthor = versionInfo.publisher\n\n# The major project version\nversion = versionInfo.formatVersionForGUI(\n\tversionInfo.version_year,\n\tversionInfo.version_major,\n\tversionInfo.version_minor\n)\n\n# The full version, including alpha/beta/rc tags\nrelease = versionInfo.version\n\n# -- General configuration ---------------------------------------------------\n\ndefault_role = 'py:obj'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n\t'sphinx.ext.autodoc',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\n\t\"_build\"\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.\n\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# -- Extension configuration -------------------------------------------------\n\n# sphinx.ext.autodoc configuration\n\n# Both the class\u2019 and the __init__ method\u2019s docstring are concatenated and inserted.\nautoclass_content = \"both\"\nautodoc_member_order = 'bysource'\nautodoc_mock_imports = [\n\t\"louis\", # Not our project\n]\n\n# Perform some manual mocking of specific objects.\n# autodoc can only mock modules, not objects.\nfrom sphinx.ext.autodoc.mock import _make_subclass # noqa: E402\n\nimport config # noqa: E402\n# Mock an instance of the configuration manager.\nconfig.conf = _make_subclass(\"conf\", \"config\")()\n", "path": "devDocs/conf.py"}]} | 1,865 | 292 |
gh_patches_debug_27466 | rasdani/github-patches | git_diff | vyperlang__vyper-543 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Question] Attack Vector described in Vipercoin's `approve` annotation
In [L89 of `vipercoin.v.py`](https://github.com/ethereum/viper/blob/master/examples/tokens/vipercoin.v.py#L89), the `approve` method has an annotation that begins like this
>To prevent attack vectors like the one described here and discussed here,
I don't see any description of the attack vectors described, perhaps there should be an external link here? Point me in the right direction and I can make the PR for it. :)
Thanks!
</issue>
<code>
[start of examples/tokens/vipercoin.v.py]
1 # Viper Port of MyToken
2 # THIS CONTRACT HAS NOT BEEN AUDITED!
3 # ERC20 details at:
4 # https://theethereum.wiki/w/index.php/ERC20_Token_Standard
5 # https://github.com/ethereum/EIPs/blob/master/EIPS/eip-20-token-standard.md
6 # Events of the token.
7 Transfer: __log__({_from: indexed(address), _to: indexed(address), _value: num256})
8 Approval: __log__({_owner: indexed(address), _spender: indexed(address), _value: num256})
9
10
11 # Variables of the token.
12 name: bytes32
13 symbol: bytes32
14 totalSupply: num
15 decimals: num
16 balances: num[address]
17 allowed: num[address][address]
18
19 @public
20 def __init__(_name: bytes32, _symbol: bytes32, _decimals: num, _initialSupply: num):
21
22 self.name = _name
23 self.symbol = _symbol
24 self.decimals = _decimals
25 self.totalSupply = _initialSupply * 10 ** _decimals
26 self.balances[msg.sender] = self.totalSupply
27
28 @public
29 @constant
30 def symbol() -> bytes32:
31
32 return self.symbol
33
34 @public
35 @constant
36 def name() -> bytes32:
37
38 return self.name
39
40
41 # What is the balance of a particular account?
42 @public
43 @constant
44 def balanceOf(_owner: address) -> num256:
45
46 return as_num256(self.balances[_owner])
47
48
49 # Return total supply of token.
50 @public
51 @constant
52 def totalSupply() -> num256:
53
54 return as_num256(self.totalSupply)
55
56
57 # Send `_value` tokens to `_to` from your account
58 @public
59 def transfer(_to: address, _amount: num(num256)) -> bool:
60
61 assert self.balances[msg.sender] >= _amount
62 assert self.balances[_to] + _amount >= self.balances[_to]
63
64 self.balances[msg.sender] -= _amount # Subtract from the sender
65 self.balances[_to] += _amount # Add the same to the recipient
66 log.Transfer(msg.sender, _to, as_num256(_amount)) # log transfer event.
67
68 return True
69
70
71 # Transfer allowed tokens from a specific account to another.
72 @public
73 def transferFrom(_from: address, _to: address, _value: num(num256)) -> bool:
74
75 assert _value <= self.allowed[_from][msg.sender]
76 assert _value <= self.balances[_from]
77
78 self.balances[_from] -= _value # decrease balance of from address.
79 self.allowed[_from][msg.sender] -= _value # decrease allowance.
80 self.balances[_to] += _value # incease balance of to address.
81 log.Transfer(_from, _to, as_num256(_value)) # log transfer event.
82
83 return True
84
85
86 # Allow _spender to withdraw from your account, multiple times, up to the _value amount.
87 # If this function is called again it overwrites the current allowance with _value.
88 #
89 # NOTE: To prevent attack vectors like the one described here and discussed here,
90 # clients SHOULD make sure to create user interfaces in such a way that they
91 # set the allowance first to 0 before setting it to another value for the
92 # same spender. THOUGH The contract itself shouldn't enforce it, to allow
93 # backwards compatilibilty with contracts deployed before.
94 #
95 @public
96 def approve(_spender: address, _amount: num(num256)) -> bool:
97
98 self.allowed[msg.sender][_spender] = _amount
99 log.Approval(msg.sender, _spender, as_num256(_amount))
100
101 return True
102
103
104 # Get the allowance an address has to spend anothers' token.
105 @public
106 def allowance(_owner: address, _spender: address) -> num256:
107
108 return as_num256(self.allowed[_owner][_spender])
109
[end of examples/tokens/vipercoin.v.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/tokens/vipercoin.v.py b/examples/tokens/vipercoin.v.py
--- a/examples/tokens/vipercoin.v.py
+++ b/examples/tokens/vipercoin.v.py
@@ -86,12 +86,15 @@
# Allow _spender to withdraw from your account, multiple times, up to the _value amount.
# If this function is called again it overwrites the current allowance with _value.
#
-# NOTE: To prevent attack vectors like the one described here and discussed here,
-# clients SHOULD make sure to create user interfaces in such a way that they
+# NOTE: We would like to prevent attack vectors like the one described here:
+# https://docs.google.com/document/d/1YLPtQxZu1UAvO9cZ1O2RPXBbT0mooh4DYKjA_jp-RLM/edit#heading=h.m9fhqynw2xvt
+# and discussed here:
+# https://github.com/ethereum/EIPs/issues/20#issuecomment-263524729
+#
+# Clients SHOULD make sure to create user interfaces in such a way that they
# set the allowance first to 0 before setting it to another value for the
# same spender. THOUGH The contract itself shouldn't enforce it, to allow
# backwards compatilibilty with contracts deployed before.
-#
@public
def approve(_spender: address, _amount: num(num256)) -> bool:
@@ -101,7 +104,7 @@
return True
-# Get the allowance an address has to spend anothers' token.
+# Get the allowance an address has to spend another's token.
@public
def allowance(_owner: address, _spender: address) -> num256:
| {"golden_diff": "diff --git a/examples/tokens/vipercoin.v.py b/examples/tokens/vipercoin.v.py\n--- a/examples/tokens/vipercoin.v.py\n+++ b/examples/tokens/vipercoin.v.py\n@@ -86,12 +86,15 @@\n # Allow _spender to withdraw from your account, multiple times, up to the _value amount.\n # If this function is called again it overwrites the current allowance with _value.\n #\n-# NOTE: To prevent attack vectors like the one described here and discussed here,\n-# clients SHOULD make sure to create user interfaces in such a way that they\n+# NOTE: We would like to prevent attack vectors like the one described here:\n+# https://docs.google.com/document/d/1YLPtQxZu1UAvO9cZ1O2RPXBbT0mooh4DYKjA_jp-RLM/edit#heading=h.m9fhqynw2xvt\n+# and discussed here:\n+# https://github.com/ethereum/EIPs/issues/20#issuecomment-263524729\n+#\n+# Clients SHOULD make sure to create user interfaces in such a way that they\n # set the allowance first to 0 before setting it to another value for the\n # same spender. THOUGH The contract itself shouldn't enforce it, to allow\n # backwards compatilibilty with contracts deployed before.\n-#\n @public\n def approve(_spender: address, _amount: num(num256)) -> bool:\n \n@@ -101,7 +104,7 @@\n return True\n \n \n-# Get the allowance an address has to spend anothers' token.\n+# Get the allowance an address has to spend another's token.\n @public\n def allowance(_owner: address, _spender: address) -> num256:\n", "issue": "[Question] Attack Vector described in Vipercoin's `approve` annotation\nIn [L89 of `vipercoin.v.py`](https://github.com/ethereum/viper/blob/master/examples/tokens/vipercoin.v.py#L89), the `approve` method has an annotation that begins like this\r\n\r\n>To prevent attack vectors like the one described here and discussed here,\r\n\r\nI don't see any description of the attack vectors described, perhaps there should be an external link here? Point me in the right direction and I can make the PR for it. :)\r\n\r\nThanks!\n", "before_files": [{"content": "# Viper Port of MyToken\n# THIS CONTRACT HAS NOT BEEN AUDITED!\n# ERC20 details at:\n# https://theethereum.wiki/w/index.php/ERC20_Token_Standard\n# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-20-token-standard.md\n# Events of the token.\nTransfer: __log__({_from: indexed(address), _to: indexed(address), _value: num256})\nApproval: __log__({_owner: indexed(address), _spender: indexed(address), _value: num256})\n\n\n# Variables of the token.\nname: bytes32\nsymbol: bytes32\ntotalSupply: num\ndecimals: num\nbalances: num[address]\nallowed: num[address][address]\n\n@public\ndef __init__(_name: bytes32, _symbol: bytes32, _decimals: num, _initialSupply: num):\n \n self.name = _name\n self.symbol = _symbol\n self.decimals = _decimals\n self.totalSupply = _initialSupply * 10 ** _decimals\n self.balances[msg.sender] = self.totalSupply\n\n@public\n@constant\ndef symbol() -> bytes32:\n\n return self.symbol\n\n@public\n@constant\ndef name() -> bytes32:\n\n return self.name\n\n\n# What is the balance of a particular account?\n@public\n@constant\ndef balanceOf(_owner: address) -> num256:\n\n return as_num256(self.balances[_owner])\n\n\n# Return total supply of token.\n@public\n@constant\ndef totalSupply() -> num256:\n\n return as_num256(self.totalSupply)\n\n\n# Send `_value` tokens to `_to` from your account\n@public\ndef transfer(_to: address, _amount: num(num256)) -> bool:\n\n assert self.balances[msg.sender] >= _amount\n assert self.balances[_to] + _amount >= self.balances[_to]\n\n self.balances[msg.sender] -= _amount # Subtract from the sender\n self.balances[_to] += _amount # Add the same to the recipient\n log.Transfer(msg.sender, _to, as_num256(_amount)) # log transfer event.\n\n return True\n\n\n# Transfer allowed tokens from a specific account to another.\n@public\ndef transferFrom(_from: address, _to: address, _value: num(num256)) -> bool:\n\n assert _value <= self.allowed[_from][msg.sender]\n assert _value <= self.balances[_from]\n\n self.balances[_from] -= _value # decrease balance of from address.\n self.allowed[_from][msg.sender] -= _value # decrease allowance.\n self.balances[_to] += _value # incease balance of to address.\n log.Transfer(_from, _to, as_num256(_value)) # log transfer event.\n \n return True\n\n\n# Allow _spender to withdraw from your account, multiple times, up to the _value amount.\n# If this function is called again it overwrites the current allowance with _value.\n#\n# NOTE: To prevent attack vectors like the one described here and discussed here,\n# clients SHOULD make sure to create user interfaces in such a way that they\n# set the allowance first to 0 before setting it to another value for the\n# same spender. THOUGH The contract itself shouldn't enforce it, to allow\n# backwards compatilibilty with contracts deployed before.\n#\n@public\ndef approve(_spender: address, _amount: num(num256)) -> bool:\n\n self.allowed[msg.sender][_spender] = _amount\n log.Approval(msg.sender, _spender, as_num256(_amount))\n\n return True\n\n\n# Get the allowance an address has to spend anothers' token.\n@public\ndef allowance(_owner: address, _spender: address) -> num256:\n\n return as_num256(self.allowed[_owner][_spender])\n", "path": "examples/tokens/vipercoin.v.py"}]} | 1,783 | 401 |
gh_patches_debug_22788 | rasdani/github-patches | git_diff | CTPUG__wafer-193 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove'unicode' calls from wafer
Current wafer using python 3 fails on several admin tasks because `UserProfile.__str__` tries to call `unicode`, which is obviously not defined.
We should handle the difference between python 2 and python 3 correctly in this situation.
There are a couple of other calls to unicode() that look dangerous in the error paths in /registration/views.py that should probably be fixed as well.
</issue>
<code>
[start of wafer/users/models.py]
1 from django.contrib.auth.models import User
2 from django.db import models
3 from django.db.models.signals import post_save
4 from django.utils.encoding import python_2_unicode_compatible
5
6 from libravatar import libravatar_url
7 try:
8 from urllib2 import urlparse
9 except ImportError:
10 from urllib import parse as urlparse
11 from django.utils.http import urlquote
12
13 from wafer.talks.models import ACCEPTED, PENDING
14
15
16 @python_2_unicode_compatible
17 class UserProfile(models.Model):
18 user = models.OneToOneField(User)
19 contact_number = models.CharField(max_length=16, null=True, blank=True)
20 bio = models.TextField(null=True, blank=True)
21
22 homepage = models.CharField(max_length=256, null=True, blank=True)
23 # We should probably do social auth instead
24 # And care about other code hosting sites...
25 twitter_handle = models.CharField(max_length=15, null=True, blank=True)
26 github_username = models.CharField(max_length=32, null=True, blank=True)
27
28 def __str__(self):
29 return unicode(self.user)
30
31 def accepted_talks(self):
32 return self.user.talks.filter(status=ACCEPTED)
33
34 def pending_talks(self):
35 return self.user.talks.filter(status=PENDING)
36
37 def avatar_url(self, size=96, https=True, default='mm'):
38 if not self.user.email:
39 return None
40 return libravatar_url(self.user.email, size=size, https=https,
41 default=default)
42
43 def homepage_url(self):
44 """Try ensure we prepend http: to the url if there's nothing there
45
46 This is to ensure we're not generating relative links in the
47 user templates."""
48 if not self.homepage:
49 return self.homepage
50 parsed = urlparse.urlparse(self.homepage)
51 if parsed.scheme:
52 return self.homepage
53 # Vague sanity check
54 abs_url = ''.join(['http://', self.homepage])
55 if urlparse.urlparse(abs_url).scheme == 'http':
56 return abs_url
57 return self.homepage
58
59 def display_name(self):
60 return self.user.get_full_name() or self.user.username
61
62
63 def create_user_profile(sender, instance, created, raw=False, **kwargs):
64 if raw:
65 return
66 if created:
67 UserProfile.objects.create(user=instance)
68
69 post_save.connect(create_user_profile, sender=User)
70
[end of wafer/users/models.py]
[start of wafer/registration/views.py]
1 import urllib
2
3 from django.contrib.auth import login
4 from django.contrib import messages
5 from django.core.urlresolvers import reverse
6 from django.conf import settings
7 from django.http import Http404, HttpResponseRedirect
8
9 from wafer.registration.sso import SSOError, debian_sso, github_sso
10
11
12 def redirect_profile(request):
13 '''
14 The default destination from logging in, redirect to the actual profile URL
15 '''
16 if request.user.is_authenticated():
17 return HttpResponseRedirect(reverse('wafer_user_profile',
18 args=(request.user.username,)))
19 else:
20 return HttpResponseRedirect(reverse('wafer_page', args=('index',)))
21
22
23 def github_login(request):
24 if 'github' not in settings.WAFER_SSO:
25 raise Http404()
26
27 if 'code' not in request.GET:
28 return HttpResponseRedirect(
29 'https://github.com/login/oauth/authorize?' + urllib.urlencode({
30 'client_id': settings.WAFER_GITHUB_CLIENT_ID,
31 'redirect_uri': request.build_absolute_uri(
32 reverse(github_login)),
33 'scope': 'user:email',
34 'state': request.META['CSRF_COOKIE'],
35 }))
36
37 try:
38 if request.GET['state'] != request.META['CSRF_COOKIE']:
39 raise SSOError('Incorrect state')
40
41 user = github_sso(request.GET['code'])
42 except SSOError as e:
43 messages.error(request, unicode(e))
44 return HttpResponseRedirect(reverse('auth_login'))
45
46 login(request, user)
47 return redirect_profile(request)
48
49
50 def debian_login(request):
51 if 'debian' not in settings.WAFER_SSO:
52 raise Http404()
53
54 try:
55 user = debian_sso(request.META)
56 except SSOError as e:
57 messages.error(request, unicode(e))
58 return HttpResponseRedirect(reverse('auth_login'))
59
60 login(request, user)
61 return redirect_profile(request)
62
[end of wafer/registration/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wafer/registration/views.py b/wafer/registration/views.py
--- a/wafer/registration/views.py
+++ b/wafer/registration/views.py
@@ -40,7 +40,7 @@
user = github_sso(request.GET['code'])
except SSOError as e:
- messages.error(request, unicode(e))
+ messages.error(request, u'%s' % e)
return HttpResponseRedirect(reverse('auth_login'))
login(request, user)
@@ -54,7 +54,7 @@
try:
user = debian_sso(request.META)
except SSOError as e:
- messages.error(request, unicode(e))
+ messages.error(request, u'%s' % e)
return HttpResponseRedirect(reverse('auth_login'))
login(request, user)
diff --git a/wafer/users/models.py b/wafer/users/models.py
--- a/wafer/users/models.py
+++ b/wafer/users/models.py
@@ -26,7 +26,7 @@
github_username = models.CharField(max_length=32, null=True, blank=True)
def __str__(self):
- return unicode(self.user)
+ return u'%s' % self.user
def accepted_talks(self):
return self.user.talks.filter(status=ACCEPTED)
| {"golden_diff": "diff --git a/wafer/registration/views.py b/wafer/registration/views.py\n--- a/wafer/registration/views.py\n+++ b/wafer/registration/views.py\n@@ -40,7 +40,7 @@\n \n user = github_sso(request.GET['code'])\n except SSOError as e:\n- messages.error(request, unicode(e))\n+ messages.error(request, u'%s' % e)\n return HttpResponseRedirect(reverse('auth_login'))\n \n login(request, user)\n@@ -54,7 +54,7 @@\n try:\n user = debian_sso(request.META)\n except SSOError as e:\n- messages.error(request, unicode(e))\n+ messages.error(request, u'%s' % e)\n return HttpResponseRedirect(reverse('auth_login'))\n \n login(request, user)\ndiff --git a/wafer/users/models.py b/wafer/users/models.py\n--- a/wafer/users/models.py\n+++ b/wafer/users/models.py\n@@ -26,7 +26,7 @@\n github_username = models.CharField(max_length=32, null=True, blank=True)\n \n def __str__(self):\n- return unicode(self.user)\n+ return u'%s' % self.user\n \n def accepted_talks(self):\n return self.user.talks.filter(status=ACCEPTED)\n", "issue": "Remove'unicode' calls from wafer\nCurrent wafer using python 3 fails on several admin tasks because `UserProfile.__str__` tries to call `unicode`, which is obviously not defined.\n\nWe should handle the difference between python 2 and python 3 correctly in this situation.\n\nThere are a couple of other calls to unicode() that look dangerous in the error paths in /registration/views.py that should probably be fixed as well.\n\n", "before_files": [{"content": "from django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.utils.encoding import python_2_unicode_compatible\n\nfrom libravatar import libravatar_url\ntry:\n from urllib2 import urlparse\nexcept ImportError:\n from urllib import parse as urlparse\nfrom django.utils.http import urlquote\n\nfrom wafer.talks.models import ACCEPTED, PENDING\n\n\n@python_2_unicode_compatible\nclass UserProfile(models.Model):\n user = models.OneToOneField(User)\n contact_number = models.CharField(max_length=16, null=True, blank=True)\n bio = models.TextField(null=True, blank=True)\n\n homepage = models.CharField(max_length=256, null=True, blank=True)\n # We should probably do social auth instead\n # And care about other code hosting sites...\n twitter_handle = models.CharField(max_length=15, null=True, blank=True)\n github_username = models.CharField(max_length=32, null=True, blank=True)\n\n def __str__(self):\n return unicode(self.user)\n\n def accepted_talks(self):\n return self.user.talks.filter(status=ACCEPTED)\n\n def pending_talks(self):\n return self.user.talks.filter(status=PENDING)\n\n def avatar_url(self, size=96, https=True, default='mm'):\n if not self.user.email:\n return None\n return libravatar_url(self.user.email, size=size, https=https,\n default=default)\n\n def homepage_url(self):\n \"\"\"Try ensure we prepend http: to the url if there's nothing there\n\n This is to ensure we're not generating relative links in the\n user templates.\"\"\"\n if not self.homepage:\n return self.homepage\n parsed = urlparse.urlparse(self.homepage)\n if parsed.scheme:\n return self.homepage\n # Vague sanity check\n abs_url = ''.join(['http://', self.homepage])\n if urlparse.urlparse(abs_url).scheme == 'http':\n return abs_url\n return self.homepage\n\n def display_name(self):\n return self.user.get_full_name() or self.user.username\n\n\ndef create_user_profile(sender, instance, created, raw=False, **kwargs):\n if raw:\n return\n if created:\n UserProfile.objects.create(user=instance)\n\npost_save.connect(create_user_profile, sender=User)\n", "path": "wafer/users/models.py"}, {"content": "import urllib\n\nfrom django.contrib.auth import login\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nfrom django.http import Http404, HttpResponseRedirect\n\nfrom wafer.registration.sso import SSOError, debian_sso, github_sso\n\n\ndef redirect_profile(request):\n '''\n The default destination from logging in, redirect to the actual profile URL\n '''\n if request.user.is_authenticated():\n return HttpResponseRedirect(reverse('wafer_user_profile',\n args=(request.user.username,)))\n else:\n return HttpResponseRedirect(reverse('wafer_page', args=('index',)))\n\n\ndef github_login(request):\n if 'github' not in settings.WAFER_SSO:\n raise Http404()\n\n if 'code' not in request.GET:\n return HttpResponseRedirect(\n 'https://github.com/login/oauth/authorize?' + urllib.urlencode({\n 'client_id': settings.WAFER_GITHUB_CLIENT_ID,\n 'redirect_uri': request.build_absolute_uri(\n reverse(github_login)),\n 'scope': 'user:email',\n 'state': request.META['CSRF_COOKIE'],\n }))\n\n try:\n if request.GET['state'] != request.META['CSRF_COOKIE']:\n raise SSOError('Incorrect state')\n\n user = github_sso(request.GET['code'])\n except SSOError as e:\n messages.error(request, unicode(e))\n return HttpResponseRedirect(reverse('auth_login'))\n\n login(request, user)\n return redirect_profile(request)\n\n\ndef debian_login(request):\n if 'debian' not in settings.WAFER_SSO:\n raise Http404()\n\n try:\n user = debian_sso(request.META)\n except SSOError as e:\n messages.error(request, unicode(e))\n return HttpResponseRedirect(reverse('auth_login'))\n\n login(request, user)\n return redirect_profile(request)\n", "path": "wafer/registration/views.py"}]} | 1,799 | 293 |
gh_patches_debug_11094 | rasdani/github-patches | git_diff | facebookresearch__dynabench-766 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Creating a task with the "Task Code" as a number doesn't work as expected.
After creating a task with the task code as a number, and accepting the task, when users want to navigate to the task, it should ideally take us to a page which says "The task owner still needs to activate this task.", but in this case, we are shown the respective page for a millisecond, and taken back to the home page, which I think is unexpected behaviour.
A demonstration is given in the following screen recording of the same issue.
**Steps to reproduce**:
- Create a task proposal with the "Task Code" field as a number
- Accept the task as the admin user.
- Now try to click on the respective task from your "Tasks" page. It should just take you back to the homepage.
This seems to happen only for a purely numeric "Task Code" and not for an alphanumeric "Task Code"
https://user-images.githubusercontent.com/48560219/135757335-d98f116f-b7d6-44dc-a1fd-0c8b6fac7c61.mov
</issue>
<code>
[start of api/controllers/task_proposals.py]
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 # This source code is licensed under the MIT license found in the
3 # LICENSE file in the root directory of this source tree.
4
5 import re
6
7 import bottle
8
9 import common.auth as _auth
10 import common.helpers as util
11 from common.logging import logger
12 from models.base import DBSession as dbs
13 from models.task import TaskModel
14 from models.task_proposal import TaskProposal, TaskProposalModel
15 from models.user import UserModel
16
17
18 @bottle.get("/task_proposals/user/<page:int>/<limit:int>")
19 @_auth.requires_auth
20 def get_user_task_proposals(credentials, page, limit):
21 tpm = TaskProposalModel()
22 proposals = tpm.getByUid(credentials["id"])
23 identifiers = []
24 for proposal in proposals:
25 identifiers.append(proposal.to_dict())
26 return util.json_encode(
27 {
28 "data": identifiers[page * limit : page * limit + limit],
29 "count": len(identifiers),
30 }
31 )
32
33
34 @bottle.get("/task_proposals/all/<page:int>/<limit:int>")
35 @_auth.requires_auth
36 def get_all_task_proposals(credentials, page, limit):
37 um = UserModel()
38 user = um.get(credentials["id"])
39 if not user.admin:
40 bottle.abort(403, "Access denied")
41
42 proposals = dbs.query(TaskProposal)
43 identifiers = []
44 for proposal in proposals:
45 identifiers.append(proposal.to_dict())
46 return util.json_encode(
47 {
48 "data": identifiers[page * limit : page * limit + limit],
49 "count": len(identifiers),
50 }
51 )
52
53
54 @bottle.post("/task_proposals/create")
55 @_auth.requires_auth
56 def create_task_proposal(credentials):
57 data = bottle.request.json
58
59 if not util.check_fields(data, ["task_code", "name", "desc", "longdesc"]):
60 bottle.abort(400, "Missing data")
61
62 tm = TaskModel()
63 if tm.getByTaskCode(data["task_code"]):
64 bottle.abort(400, "Invalid task code; this task code is already taken")
65
66 if tm.getByName(data["name"]):
67 bottle.abort(400, "Invalid name; this name is already taken")
68
69 if not bool(re.search("^[a-zA-Z0-9_-]*$", data["task_code"])):
70 bottle.abort(
71 400,
72 "Invalid task code (no special characters allowed besides underscores "
73 + "and dashes)",
74 )
75
76 try:
77 tp = TaskProposal(
78 uid=credentials["id"],
79 task_code=data["task_code"],
80 name=data["name"],
81 desc=data["desc"],
82 longdesc=data["longdesc"],
83 )
84
85 tm.dbs.add(tp)
86 tm.dbs.flush()
87 tm.dbs.commit()
88 logger.info("Added task proposal (%s)" % (tp.id))
89
90 except Exception as error_message:
91 logger.error("Could not create task proposal (%s)" % error_message)
92 return False
93
94 return util.json_encode({"success": "ok", "id": tp.id})
95
[end of api/controllers/task_proposals.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/api/controllers/task_proposals.py b/api/controllers/task_proposals.py
--- a/api/controllers/task_proposals.py
+++ b/api/controllers/task_proposals.py
@@ -66,11 +66,13 @@
if tm.getByName(data["name"]):
bottle.abort(400, "Invalid name; this name is already taken")
- if not bool(re.search("^[a-zA-Z0-9_-]*$", data["task_code"])):
+ if not bool(
+ re.search("(?=^[a-zA-Z0-9_-]*$)(?=.*[a-zA-Z].*).*$", data["task_code"])
+ ):
bottle.abort(
400,
"Invalid task code (no special characters allowed besides underscores "
- + "and dashes)",
+ + "and dashes. At least one letter required)",
)
try:
| {"golden_diff": "diff --git a/api/controllers/task_proposals.py b/api/controllers/task_proposals.py\n--- a/api/controllers/task_proposals.py\n+++ b/api/controllers/task_proposals.py\n@@ -66,11 +66,13 @@\n if tm.getByName(data[\"name\"]):\n bottle.abort(400, \"Invalid name; this name is already taken\")\n \n- if not bool(re.search(\"^[a-zA-Z0-9_-]*$\", data[\"task_code\"])):\n+ if not bool(\n+ re.search(\"(?=^[a-zA-Z0-9_-]*$)(?=.*[a-zA-Z].*).*$\", data[\"task_code\"])\n+ ):\n bottle.abort(\n 400,\n \"Invalid task code (no special characters allowed besides underscores \"\n- + \"and dashes)\",\n+ + \"and dashes. At least one letter required)\",\n )\n \n try:\n", "issue": "Creating a task with the \"Task Code\" as a number doesn't work as expected.\nAfter creating a task with the task code as a number, and accepting the task, when users want to navigate to the task, it should ideally take us to a page which says \"The task owner still needs to activate this task.\", but in this case, we are shown the respective page for a millisecond, and taken back to the home page, which I think is unexpected behaviour.\r\n\r\nA demonstration is given in the following screen recording of the same issue.\r\n\r\n**Steps to reproduce**:\r\n- Create a task proposal with the \"Task Code\" field as a number\r\n- Accept the task as the admin user.\r\n- Now try to click on the respective task from your \"Tasks\" page. It should just take you back to the homepage.\r\n\r\nThis seems to happen only for a purely numeric \"Task Code\" and not for an alphanumeric \"Task Code\"\r\n\r\nhttps://user-images.githubusercontent.com/48560219/135757335-d98f116f-b7d6-44dc-a1fd-0c8b6fac7c61.mov\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport re\n\nimport bottle\n\nimport common.auth as _auth\nimport common.helpers as util\nfrom common.logging import logger\nfrom models.base import DBSession as dbs\nfrom models.task import TaskModel\nfrom models.task_proposal import TaskProposal, TaskProposalModel\nfrom models.user import UserModel\n\n\[email protected](\"/task_proposals/user/<page:int>/<limit:int>\")\n@_auth.requires_auth\ndef get_user_task_proposals(credentials, page, limit):\n tpm = TaskProposalModel()\n proposals = tpm.getByUid(credentials[\"id\"])\n identifiers = []\n for proposal in proposals:\n identifiers.append(proposal.to_dict())\n return util.json_encode(\n {\n \"data\": identifiers[page * limit : page * limit + limit],\n \"count\": len(identifiers),\n }\n )\n\n\[email protected](\"/task_proposals/all/<page:int>/<limit:int>\")\n@_auth.requires_auth\ndef get_all_task_proposals(credentials, page, limit):\n um = UserModel()\n user = um.get(credentials[\"id\"])\n if not user.admin:\n bottle.abort(403, \"Access denied\")\n\n proposals = dbs.query(TaskProposal)\n identifiers = []\n for proposal in proposals:\n identifiers.append(proposal.to_dict())\n return util.json_encode(\n {\n \"data\": identifiers[page * limit : page * limit + limit],\n \"count\": len(identifiers),\n }\n )\n\n\[email protected](\"/task_proposals/create\")\n@_auth.requires_auth\ndef create_task_proposal(credentials):\n data = bottle.request.json\n\n if not util.check_fields(data, [\"task_code\", \"name\", \"desc\", \"longdesc\"]):\n bottle.abort(400, \"Missing data\")\n\n tm = TaskModel()\n if tm.getByTaskCode(data[\"task_code\"]):\n bottle.abort(400, \"Invalid task code; this task code is already taken\")\n\n if tm.getByName(data[\"name\"]):\n bottle.abort(400, \"Invalid name; this name is already taken\")\n\n if not bool(re.search(\"^[a-zA-Z0-9_-]*$\", data[\"task_code\"])):\n bottle.abort(\n 400,\n \"Invalid task code (no special characters allowed besides underscores \"\n + \"and dashes)\",\n )\n\n try:\n tp = TaskProposal(\n uid=credentials[\"id\"],\n task_code=data[\"task_code\"],\n name=data[\"name\"],\n desc=data[\"desc\"],\n longdesc=data[\"longdesc\"],\n )\n\n tm.dbs.add(tp)\n tm.dbs.flush()\n tm.dbs.commit()\n logger.info(\"Added task proposal (%s)\" % (tp.id))\n\n except Exception as error_message:\n logger.error(\"Could not create task proposal (%s)\" % error_message)\n return False\n\n return util.json_encode({\"success\": \"ok\", \"id\": tp.id})\n", "path": "api/controllers/task_proposals.py"}]} | 1,625 | 194 |
gh_patches_debug_4002 | rasdani/github-patches | git_diff | pypa__cibuildwheel-199 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cibuildwheel CI tests failing on Azure for windows
`cibuildwheel` CI tests which are using the sample configuration in README are failing on Windows following Azure update to support python 3.8
Given the number of CI providers now tested, I guess we can try to test `cibuildwheel` on python 2.7, 3.5, 3.6, 3.7 and 3.8 without too much overhead on test time by dispatching the python versions running `cibuildwheel` across CI providers.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 try:
5 from setuptools import setup
6 except ImportError:
7 from distutils.core import setup
8
9 setup(
10 name='cibuildwheel',
11 version='0.12.0',
12 install_requires=['bashlex!=0.13'],
13 description="Build Python wheels on CI with minimal configuration.",
14 long_description='For readme please see http://github.com/joerick/cibuildwheel',
15 author="Joe Rickerby",
16 author_email='[email protected]',
17 url='https://github.com/joerick/cibuildwheel',
18 packages=['cibuildwheel',],
19 license="BSD",
20 zip_safe=False,
21 package_data={
22 'cibuildwheel': ['resources/*'],
23 },
24 keywords='ci wheel packaging pypi travis appveyor macos linux windows',
25 classifiers=[
26 'Intended Audience :: Developers',
27 'Natural Language :: English',
28 'Programming Language :: Python :: 2',
29 'Programming Language :: Python :: 3',
30 'Development Status :: 4 - Beta',
31 'License :: OSI Approved :: BSD License',
32 'Programming Language :: Python :: Implementation :: CPython',
33 'Topic :: Software Development :: Build Tools',
34 ],
35 entry_points={
36 'console_scripts': [
37 'cibuildwheel = cibuildwheel.__main__:main',
38 ],
39 },
40 )
41
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -21,6 +21,8 @@
package_data={
'cibuildwheel': ['resources/*'],
},
+ # Supported python versions
+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
keywords='ci wheel packaging pypi travis appveyor macos linux windows',
classifiers=[
'Intended Audience :: Developers',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -21,6 +21,8 @@\n package_data={\n 'cibuildwheel': ['resources/*'],\n },\n+ # Supported python versions\n+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n keywords='ci wheel packaging pypi travis appveyor macos linux windows',\n classifiers=[\n 'Intended Audience :: Developers',\n", "issue": "cibuildwheel CI tests failing on Azure for windows\n`cibuildwheel` CI tests which are using the sample configuration in README are failing on Windows following Azure update to support python 3.8\r\n\r\nGiven the number of CI providers now tested, I guess we can try to test `cibuildwheel` on python 2.7, 3.5, 3.6, 3.7 and 3.8 without too much overhead on test time by dispatching the python versions running `cibuildwheel` across CI providers.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nsetup(\n name='cibuildwheel',\n version='0.12.0',\n install_requires=['bashlex!=0.13'],\n description=\"Build Python wheels on CI with minimal configuration.\",\n long_description='For readme please see http://github.com/joerick/cibuildwheel',\n author=\"Joe Rickerby\",\n author_email='[email protected]',\n url='https://github.com/joerick/cibuildwheel',\n packages=['cibuildwheel',],\n license=\"BSD\",\n zip_safe=False,\n package_data={\n 'cibuildwheel': ['resources/*'],\n },\n keywords='ci wheel packaging pypi travis appveyor macos linux windows',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Software Development :: Build Tools',\n ],\n entry_points={\n 'console_scripts': [\n 'cibuildwheel = cibuildwheel.__main__:main',\n ],\n },\n)\n", "path": "setup.py"}]} | 1,022 | 121 |
gh_patches_debug_65044 | rasdani/github-patches | git_diff | kserve__kserve-1583 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CommandException: No URLs matched: gs://kfserving-examples/models/mnist
/kind bug
I would like to run the kafka mnist example but when I run:
```bash
gsutil cp gs://kfserving-examples/models/mnist .
```
As per the readme, I get
```
CommandException: No URLs matched: gs://kfserving-examples/models/mnist
```
**What did you expect to happen:**
I expected to be able to download the model checkpoint.
</issue>
<code>
[start of docs/samples/kafka/setup.py]
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from setuptools import setup, find_packages
16
17 tests_require = [
18 'pytest',
19 'pytest-tornasync',
20 'mypy'
21 ]
22
23 setup(
24 name='transformer',
25 version='0.1.0',
26 author_email='[email protected]',
27 license='../../LICENSE.txt',
28 url='https://github.com/kubeflow/kfserving/docs/sameples/transformer',
29 description='Transformer',
30 long_description=open('README.md').read(),
31 python_requires='>=3.6',
32 packages=find_packages("transformer"),
33 install_requires=[
34 "kfserving>=0.2.1",
35 "argparse>=1.4.0",
36 "requests>=2.22.0",
37 "joblib>=0.13.2",
38 "pandas>=0.24.2",
39 "numpy>=1.16.3",
40 "kubernetes >= 9.0.0",
41 "opencv-python-headless==4.0.0.21",
42 "boto3==1.7.2"
43 ],
44 tests_require=tests_require,
45 extras_require={'test': tests_require}
46 )
47
[end of docs/samples/kafka/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/samples/kafka/setup.py b/docs/samples/kafka/setup.py
--- a/docs/samples/kafka/setup.py
+++ b/docs/samples/kafka/setup.py
@@ -25,7 +25,7 @@
version='0.1.0',
author_email='[email protected]',
license='../../LICENSE.txt',
- url='https://github.com/kubeflow/kfserving/docs/sameples/transformer',
+ url='https://github.com/kubeflow/kfserving/tree/master/docs/samples#deploy-inferenceservice-with-transformer',
description='Transformer',
long_description=open('README.md').read(),
python_requires='>=3.6',
| {"golden_diff": "diff --git a/docs/samples/kafka/setup.py b/docs/samples/kafka/setup.py\n--- a/docs/samples/kafka/setup.py\n+++ b/docs/samples/kafka/setup.py\n@@ -25,7 +25,7 @@\n version='0.1.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n- url='https://github.com/kubeflow/kfserving/docs/sameples/transformer',\n+ url='https://github.com/kubeflow/kfserving/tree/master/docs/samples#deploy-inferenceservice-with-transformer',\n description='Transformer',\n long_description=open('README.md').read(),\n python_requires='>=3.6',\n", "issue": "CommandException: No URLs matched: gs://kfserving-examples/models/mnist\n/kind bug \r\n\r\nI would like to run the kafka mnist example but when I run:\r\n```bash\r\ngsutil cp gs://kfserving-examples/models/mnist .\r\n```\r\nAs per the readme, I get\r\n```\r\nCommandException: No URLs matched: gs://kfserving-examples/models/mnist\r\n```\r\n\r\n**What did you expect to happen:**\r\nI expected to be able to download the model checkpoint. \r\n\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='transformer',\n version='0.1.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/docs/sameples/transformer',\n description='Transformer',\n long_description=open('README.md').read(),\n python_requires='>=3.6',\n packages=find_packages(\"transformer\"),\n install_requires=[\n \"kfserving>=0.2.1\",\n \"argparse>=1.4.0\",\n \"requests>=2.22.0\",\n \"joblib>=0.13.2\",\n \"pandas>=0.24.2\",\n \"numpy>=1.16.3\",\n \"kubernetes >= 9.0.0\",\n \"opencv-python-headless==4.0.0.21\",\n \"boto3==1.7.2\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "docs/samples/kafka/setup.py"}]} | 1,122 | 159 |
gh_patches_debug_39370 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-8335 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use clean_address function to join multiple free text lines together
The `clean_address` method added in #7568 allows a standardised approach to taking messy ordered multiple line address strings (of any type of composition) and joining them together into a single string.
We can now use `clean_address` to replace the many variants throughout spiders of attempting to join these multi-line address strings. An added benefit is being able to quickly find where multi-line address strings are parsed (via searching for `clean_address` instances), making it easier to change address handling in the future.
Related to #5598
</issue>
<code>
[start of locations/spiders/zizzi_gb.py]
1 import scrapy
2
3 from locations.dict_parser import DictParser
4
5
6 class ZizziGBSpider(scrapy.Spider):
7 name = "zizzi_gb"
8 item_attributes = {"brand": "Zizzi", "brand_wikidata": "Q8072944"}
9 start_urls = ["https://www.zizzi.co.uk/wp-json/locations/get_venues"]
10
11 def parse(self, response):
12 for store in response.json()["data"]:
13 item = DictParser.parse(store)
14 item["addr_full"] = ", ".join(store["address"].split("\r\n"))
15 item["image"] = store["featured_image"]
16 item["website"] = store["link"]
17
18 if store["region"] == "Ireland":
19 item.pop("state")
20 item["country"] = "IE"
21 else:
22 item["country"] = "GB"
23
24 yield item
25
[end of locations/spiders/zizzi_gb.py]
[start of locations/spiders/zambrero_au.py]
1 import re
2
3 from scrapy import Spider
4 from scrapy.http import Request
5
6 from locations.categories import Categories
7 from locations.hours import OpeningHours
8 from locations.items import Feature
9
10
11 class ZambreroAUSpider(Spider):
12 name = "zambrero_au"
13 item_attributes = {"brand": "Zambrero", "brand_wikidata": "Q18636431", "extras": Categories.FAST_FOOD.value}
14 allowed_domains = ["www.zambrero.com.au"]
15
16 def start_requests(self):
17 yield Request(url=f"https://{self.allowed_domains[0]}/locations", callback=self.parse_location_list)
18
19 def parse_location_list(self, response):
20 location_urls = response.xpath('//div[@data-location-id]//a[@title="Order & Store Info"]/@href').getall()
21 for location_url in location_urls:
22 yield Request(url=location_url, callback=self.parse_location)
23
24 def parse_location(self, response):
25 properties = {
26 "ref": response.xpath("//@data-location-id").get(),
27 "name": re.sub(r"\s+", " ", response.xpath("//div[@data-location-id]/h4/text()").get()).strip(),
28 "lat": response.xpath("//@data-lat").get(),
29 "lon": response.xpath("///@data-lng").get(),
30 "addr_full": re.sub(
31 r"\s+",
32 " ",
33 " ".join(response.xpath('//div[@data-location-id]//span[contains(@class, "address")]/text()').getall()),
34 ).strip(),
35 "phone": response.xpath('//a[contains(@class, "phone")]/@href').get().replace("tel:", ""),
36 "email": response.xpath('//a[contains(@href, "mailto:")]/@href').get().replace("mailto:", ""),
37 "website": response.url,
38 "opening_hours": OpeningHours(),
39 }
40 if "Temporarily Closed" in properties["name"]:
41 return
42 if properties["phone"] == "0":
43 properties.pop("phone")
44
45 hours_text = re.sub(
46 r"\s+", " ", " ".join(response.xpath('//div[contains(@class, "hours-item")]/span/text()').getall())
47 )
48 properties["opening_hours"].add_ranges_from_string(hours_text)
49
50 # Some store names and URLs contain "Opening Soon" but numerous of
51 # these are already open and the URL hasn't been changed. A more
52 # reliable way of knowing a store is not yet open is that it has
53 # no opening hours specified.
54 if not properties["opening_hours"].as_opening_hours():
55 return
56
57 yield Feature(**properties)
58
[end of locations/spiders/zambrero_au.py]
[start of locations/spiders/woolworths_au.py]
1 import scrapy
2
3 from locations.dict_parser import DictParser
4
5
6 class WoolworthsAUSpider(scrapy.Spider):
7 name = "woolworths_au"
8 item_attributes = {"brand": "Woolworths", "brand_wikidata": "Q3249145"}
9 allowed_domains = ["woolworths.com.au"]
10 start_urls = [
11 "https://www.woolworths.com.au/apis/ui/StoreLocator/Stores?Max=10000&Division=SUPERMARKETS,PETROL,CALTEXWOW,AMPOLMETRO,AMPOL&Facility=&postcode=*"
12 ]
13 custom_settings = {"ROBOTSTXT_OBEY": False}
14 requires_proxy = "AU"
15
16 def parse(self, response):
17 data = response.json()
18
19 for i in data["Stores"]:
20 if not i["IsOpen"]:
21 continue
22
23 i["street_address"] = ", ".join(filter(None, [i["AddressLine1"], i["AddressLine2"]]))
24 i["ref"] = i.pop("StoreNo")
25 i["city"] = i.pop("Suburb")
26
27 item = DictParser.parse(i)
28
29 item["website"] = (
30 "https://www.woolworths.com.au/shop/storelocator/"
31 + "-".join([item["state"], item["city"], item["ref"], i["Division"]]).lower()
32 )
33
34 # TODO: types needs some work, NSI seems out of date too
35 item["extras"] = {"type": i["Division"]}
36
37 yield item
38
[end of locations/spiders/woolworths_au.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/woolworths_au.py b/locations/spiders/woolworths_au.py
--- a/locations/spiders/woolworths_au.py
+++ b/locations/spiders/woolworths_au.py
@@ -1,6 +1,7 @@
import scrapy
from locations.dict_parser import DictParser
+from locations.pipelines.address_clean_up import clean_address
class WoolworthsAUSpider(scrapy.Spider):
@@ -20,7 +21,7 @@
if not i["IsOpen"]:
continue
- i["street_address"] = ", ".join(filter(None, [i["AddressLine1"], i["AddressLine2"]]))
+ i["street_address"] = clean_address([i["AddressLine1"], i["AddressLine2"]])
i["ref"] = i.pop("StoreNo")
i["city"] = i.pop("Suburb")
diff --git a/locations/spiders/zambrero_au.py b/locations/spiders/zambrero_au.py
--- a/locations/spiders/zambrero_au.py
+++ b/locations/spiders/zambrero_au.py
@@ -6,6 +6,7 @@
from locations.categories import Categories
from locations.hours import OpeningHours
from locations.items import Feature
+from locations.pipelines.address_clean_up import clean_address
class ZambreroAUSpider(Spider):
@@ -27,11 +28,9 @@
"name": re.sub(r"\s+", " ", response.xpath("//div[@data-location-id]/h4/text()").get()).strip(),
"lat": response.xpath("//@data-lat").get(),
"lon": response.xpath("///@data-lng").get(),
- "addr_full": re.sub(
- r"\s+",
- " ",
- " ".join(response.xpath('//div[@data-location-id]//span[contains(@class, "address")]/text()').getall()),
- ).strip(),
+ "addr_full": clean_address(
+ " ".join(response.xpath('//div[@data-location-id]//span[contains(@class, "address")]/text()').getall())
+ ),
"phone": response.xpath('//a[contains(@class, "phone")]/@href').get().replace("tel:", ""),
"email": response.xpath('//a[contains(@href, "mailto:")]/@href').get().replace("mailto:", ""),
"website": response.url,
diff --git a/locations/spiders/zizzi_gb.py b/locations/spiders/zizzi_gb.py
--- a/locations/spiders/zizzi_gb.py
+++ b/locations/spiders/zizzi_gb.py
@@ -1,6 +1,7 @@
import scrapy
from locations.dict_parser import DictParser
+from locations.pipelines.address_clean_up import clean_address
class ZizziGBSpider(scrapy.Spider):
@@ -11,7 +12,7 @@
def parse(self, response):
for store in response.json()["data"]:
item = DictParser.parse(store)
- item["addr_full"] = ", ".join(store["address"].split("\r\n"))
+ item["addr_full"] = clean_address(store["address"].split("\r\n"))
item["image"] = store["featured_image"]
item["website"] = store["link"]
| {"golden_diff": "diff --git a/locations/spiders/woolworths_au.py b/locations/spiders/woolworths_au.py\n--- a/locations/spiders/woolworths_au.py\n+++ b/locations/spiders/woolworths_au.py\n@@ -1,6 +1,7 @@\n import scrapy\n \n from locations.dict_parser import DictParser\n+from locations.pipelines.address_clean_up import clean_address\n \n \n class WoolworthsAUSpider(scrapy.Spider):\n@@ -20,7 +21,7 @@\n if not i[\"IsOpen\"]:\n continue\n \n- i[\"street_address\"] = \", \".join(filter(None, [i[\"AddressLine1\"], i[\"AddressLine2\"]]))\n+ i[\"street_address\"] = clean_address([i[\"AddressLine1\"], i[\"AddressLine2\"]])\n i[\"ref\"] = i.pop(\"StoreNo\")\n i[\"city\"] = i.pop(\"Suburb\")\n \ndiff --git a/locations/spiders/zambrero_au.py b/locations/spiders/zambrero_au.py\n--- a/locations/spiders/zambrero_au.py\n+++ b/locations/spiders/zambrero_au.py\n@@ -6,6 +6,7 @@\n from locations.categories import Categories\n from locations.hours import OpeningHours\n from locations.items import Feature\n+from locations.pipelines.address_clean_up import clean_address\n \n \n class ZambreroAUSpider(Spider):\n@@ -27,11 +28,9 @@\n \"name\": re.sub(r\"\\s+\", \" \", response.xpath(\"//div[@data-location-id]/h4/text()\").get()).strip(),\n \"lat\": response.xpath(\"//@data-lat\").get(),\n \"lon\": response.xpath(\"///@data-lng\").get(),\n- \"addr_full\": re.sub(\n- r\"\\s+\",\n- \" \",\n- \" \".join(response.xpath('//div[@data-location-id]//span[contains(@class, \"address\")]/text()').getall()),\n- ).strip(),\n+ \"addr_full\": clean_address(\n+ \" \".join(response.xpath('//div[@data-location-id]//span[contains(@class, \"address\")]/text()').getall())\n+ ),\n \"phone\": response.xpath('//a[contains(@class, \"phone\")]/@href').get().replace(\"tel:\", \"\"),\n \"email\": response.xpath('//a[contains(@href, \"mailto:\")]/@href').get().replace(\"mailto:\", \"\"),\n \"website\": response.url,\ndiff --git a/locations/spiders/zizzi_gb.py b/locations/spiders/zizzi_gb.py\n--- a/locations/spiders/zizzi_gb.py\n+++ b/locations/spiders/zizzi_gb.py\n@@ -1,6 +1,7 @@\n import scrapy\n \n from locations.dict_parser import DictParser\n+from locations.pipelines.address_clean_up import clean_address\n \n \n class ZizziGBSpider(scrapy.Spider):\n@@ -11,7 +12,7 @@\n def parse(self, response):\n for store in response.json()[\"data\"]:\n item = DictParser.parse(store)\n- item[\"addr_full\"] = \", \".join(store[\"address\"].split(\"\\r\\n\"))\n+ item[\"addr_full\"] = clean_address(store[\"address\"].split(\"\\r\\n\"))\n item[\"image\"] = store[\"featured_image\"]\n item[\"website\"] = store[\"link\"]\n", "issue": "Use clean_address function to join multiple free text lines together\nThe `clean_address` method added in #7568 allows a standardised approach to taking messy ordered multiple line address strings (of any type of composition) and joining them together into a single string.\r\n\r\nWe can now use `clean_address` to replace the many variants throughout spiders of attempting to join these multi-line address strings. An added benefit is being able to quickly find where multi-line address strings are parsed (via searching for `clean_address` instances), making it easier to change address handling in the future.\r\n\r\nRelated to #5598\n", "before_files": [{"content": "import scrapy\n\nfrom locations.dict_parser import DictParser\n\n\nclass ZizziGBSpider(scrapy.Spider):\n name = \"zizzi_gb\"\n item_attributes = {\"brand\": \"Zizzi\", \"brand_wikidata\": \"Q8072944\"}\n start_urls = [\"https://www.zizzi.co.uk/wp-json/locations/get_venues\"]\n\n def parse(self, response):\n for store in response.json()[\"data\"]:\n item = DictParser.parse(store)\n item[\"addr_full\"] = \", \".join(store[\"address\"].split(\"\\r\\n\"))\n item[\"image\"] = store[\"featured_image\"]\n item[\"website\"] = store[\"link\"]\n\n if store[\"region\"] == \"Ireland\":\n item.pop(\"state\")\n item[\"country\"] = \"IE\"\n else:\n item[\"country\"] = \"GB\"\n\n yield item\n", "path": "locations/spiders/zizzi_gb.py"}, {"content": "import re\n\nfrom scrapy import Spider\nfrom scrapy.http import Request\n\nfrom locations.categories import Categories\nfrom locations.hours import OpeningHours\nfrom locations.items import Feature\n\n\nclass ZambreroAUSpider(Spider):\n name = \"zambrero_au\"\n item_attributes = {\"brand\": \"Zambrero\", \"brand_wikidata\": \"Q18636431\", \"extras\": Categories.FAST_FOOD.value}\n allowed_domains = [\"www.zambrero.com.au\"]\n\n def start_requests(self):\n yield Request(url=f\"https://{self.allowed_domains[0]}/locations\", callback=self.parse_location_list)\n\n def parse_location_list(self, response):\n location_urls = response.xpath('//div[@data-location-id]//a[@title=\"Order & Store Info\"]/@href').getall()\n for location_url in location_urls:\n yield Request(url=location_url, callback=self.parse_location)\n\n def parse_location(self, response):\n properties = {\n \"ref\": response.xpath(\"//@data-location-id\").get(),\n \"name\": re.sub(r\"\\s+\", \" \", response.xpath(\"//div[@data-location-id]/h4/text()\").get()).strip(),\n \"lat\": response.xpath(\"//@data-lat\").get(),\n \"lon\": response.xpath(\"///@data-lng\").get(),\n \"addr_full\": re.sub(\n r\"\\s+\",\n \" \",\n \" \".join(response.xpath('//div[@data-location-id]//span[contains(@class, \"address\")]/text()').getall()),\n ).strip(),\n \"phone\": response.xpath('//a[contains(@class, \"phone\")]/@href').get().replace(\"tel:\", \"\"),\n \"email\": response.xpath('//a[contains(@href, \"mailto:\")]/@href').get().replace(\"mailto:\", \"\"),\n \"website\": response.url,\n \"opening_hours\": OpeningHours(),\n }\n if \"Temporarily Closed\" in properties[\"name\"]:\n return\n if properties[\"phone\"] == \"0\":\n properties.pop(\"phone\")\n\n hours_text = re.sub(\n r\"\\s+\", \" \", \" \".join(response.xpath('//div[contains(@class, \"hours-item\")]/span/text()').getall())\n )\n properties[\"opening_hours\"].add_ranges_from_string(hours_text)\n\n # Some store names and URLs contain \"Opening Soon\" but numerous of\n # these are already open and the URL hasn't been changed. A more\n # reliable way of knowing a store is not yet open is that it has\n # no opening hours specified.\n if not properties[\"opening_hours\"].as_opening_hours():\n return\n\n yield Feature(**properties)\n", "path": "locations/spiders/zambrero_au.py"}, {"content": "import scrapy\n\nfrom locations.dict_parser import DictParser\n\n\nclass WoolworthsAUSpider(scrapy.Spider):\n name = \"woolworths_au\"\n item_attributes = {\"brand\": \"Woolworths\", \"brand_wikidata\": \"Q3249145\"}\n allowed_domains = [\"woolworths.com.au\"]\n start_urls = [\n \"https://www.woolworths.com.au/apis/ui/StoreLocator/Stores?Max=10000&Division=SUPERMARKETS,PETROL,CALTEXWOW,AMPOLMETRO,AMPOL&Facility=&postcode=*\"\n ]\n custom_settings = {\"ROBOTSTXT_OBEY\": False}\n requires_proxy = \"AU\"\n\n def parse(self, response):\n data = response.json()\n\n for i in data[\"Stores\"]:\n if not i[\"IsOpen\"]:\n continue\n\n i[\"street_address\"] = \", \".join(filter(None, [i[\"AddressLine1\"], i[\"AddressLine2\"]]))\n i[\"ref\"] = i.pop(\"StoreNo\")\n i[\"city\"] = i.pop(\"Suburb\")\n\n item = DictParser.parse(i)\n\n item[\"website\"] = (\n \"https://www.woolworths.com.au/shop/storelocator/\"\n + \"-\".join([item[\"state\"], item[\"city\"], item[\"ref\"], i[\"Division\"]]).lower()\n )\n\n # TODO: types needs some work, NSI seems out of date too\n item[\"extras\"] = {\"type\": i[\"Division\"]}\n\n yield item\n", "path": "locations/spiders/woolworths_au.py"}]} | 2,040 | 729 |
gh_patches_debug_20404 | rasdani/github-patches | git_diff | ietf-tools__datatracker-5075 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Possible timezone related failure
### Describe the issue
https://github.com/ietf-tools/datatracker/actions/runs/4071644533/jobs/7013629899
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of ietf/group/factories.py]
1 # Copyright The IETF Trust 2015-2022, All Rights Reserved
2 import datetime
3 import debug # pyflakes:ignore
4 import factory
5
6 from typing import List # pyflakes:ignore
7
8 from django.utils import timezone
9
10 from ietf.group.models import Group, Role, GroupEvent, GroupMilestone, \
11 GroupHistory, RoleHistory
12 from ietf.review.factories import ReviewTeamSettingsFactory
13
14 class GroupFactory(factory.django.DjangoModelFactory):
15 class Meta:
16 model = Group
17 django_get_or_create = ('acronym',)
18
19 name = factory.Faker('sentence',nb_words=6)
20 acronym = factory.Sequence(lambda n: 'acronym%d' %n)
21 state_id = 'active'
22 type_id = 'wg'
23 list_email = factory.LazyAttribute(lambda a: '%[email protected]'% a.acronym)
24 uses_milestone_dates = True
25 used_roles = [] # type: List[str]
26
27 @factory.lazy_attribute
28 def parent(self):
29 if self.type_id in ['wg','ag']:
30 return GroupFactory(type_id='area')
31 elif self.type_id in ['rg','rag']:
32 return GroupFactory(acronym='irtf', type_id='irtf')
33 else:
34 return None
35
36 class ReviewTeamFactory(GroupFactory):
37
38 type_id = 'review'
39
40 @factory.post_generation
41 def settings(obj, create, extracted, **kwargs):
42 ReviewTeamSettingsFactory.create(group=obj,**kwargs)
43
44 class RoleFactory(factory.django.DjangoModelFactory):
45 class Meta:
46 model = Role
47
48 group = factory.SubFactory(GroupFactory)
49 person = factory.SubFactory('ietf.person.factories.PersonFactory')
50 email = factory.LazyAttribute(lambda obj: obj.person.email())
51
52 class GroupEventFactory(factory.django.DjangoModelFactory):
53 class Meta:
54 model = GroupEvent
55
56 group = factory.SubFactory(GroupFactory)
57 by = factory.SubFactory('ietf.person.factories.PersonFactory')
58 type = 'comment'
59 desc = factory.Faker('paragraph')
60
61 class BaseGroupMilestoneFactory(factory.django.DjangoModelFactory):
62 class Meta:
63 model = GroupMilestone
64
65 group = factory.SubFactory(GroupFactory)
66 state_id = 'active'
67 desc = factory.Faker('sentence')
68
69 class DatedGroupMilestoneFactory(BaseGroupMilestoneFactory):
70 group = factory.SubFactory(GroupFactory, uses_milestone_dates=True)
71 due = timezone.now()+datetime.timedelta(days=180)
72
73 class DatelessGroupMilestoneFactory(BaseGroupMilestoneFactory):
74 group = factory.SubFactory(GroupFactory, uses_milestone_dates=False)
75 order = factory.Sequence(lambda n: n)
76
77 class GroupHistoryFactory(factory.django.DjangoModelFactory):
78 class Meta:
79 model=GroupHistory
80
81 time = lambda: timezone.now()
82 group = factory.SubFactory(GroupFactory, state_id='active')
83
84 name = factory.LazyAttribute(lambda obj: obj.group.name)
85 state_id = factory.LazyAttribute(lambda obj: obj.group.state_id)
86 type_id = factory.LazyAttribute(lambda obj: obj.group.type_id)
87 parent = factory.LazyAttribute(lambda obj: obj.group.parent)
88 uses_milestone_dates = factory.LazyAttribute(lambda obj: obj.group.uses_milestone_dates)
89 used_roles = factory.LazyAttribute(lambda obj: obj.group.used_roles)
90 description = factory.LazyAttribute(lambda obj: obj.group.description)
91 list_email = factory.LazyAttribute(lambda obj: '%[email protected]'% obj.group.acronym) #TODO : move this to GroupFactory
92 list_subscribe = factory.LazyAttribute(lambda obj: obj.group.list_subscribe)
93 list_archive = factory.LazyAttribute(lambda obj: obj.group.list_archive)
94 comments = factory.LazyAttribute(lambda obj: obj.group.comments)
95 meeting_seen_as_area = factory.LazyAttribute(lambda obj: obj.group.meeting_seen_as_area)
96 acronym = factory.LazyAttribute(lambda obj: obj.group.acronym)
97
98 @factory.post_generation
99 def unused_states(obj, create, extracted, **kwargs):
100 if create:
101 if extracted:
102 obj.unused_states.set(extracted)
103 else:
104 obj.unused_states.set(obj.group.unused_states.all())
105 @factory.post_generation
106 def unused_tags(obj, create, extracted, **kwargs):
107 if create:
108 if extracted:
109 obj.unused_tags.set(extracted)
110 else:
111 obj.unused_tags.set(obj.group.unused_states.all())
112
113 class RoleHistoryFactory(factory.django.DjangoModelFactory):
114 class Meta:
115 model=RoleHistory
116
117 group = factory.SubFactory(GroupHistoryFactory)
118 person = factory.SubFactory('ietf.person.factories.PersonFactory')
119 email = factory.LazyAttribute(lambda obj: obj.person.email())
120
121
[end of ietf/group/factories.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ietf/group/factories.py b/ietf/group/factories.py
--- a/ietf/group/factories.py
+++ b/ietf/group/factories.py
@@ -10,6 +10,8 @@
from ietf.group.models import Group, Role, GroupEvent, GroupMilestone, \
GroupHistory, RoleHistory
from ietf.review.factories import ReviewTeamSettingsFactory
+from ietf.utils.timezone import date_today
+
class GroupFactory(factory.django.DjangoModelFactory):
class Meta:
@@ -68,7 +70,7 @@
class DatedGroupMilestoneFactory(BaseGroupMilestoneFactory):
group = factory.SubFactory(GroupFactory, uses_milestone_dates=True)
- due = timezone.now()+datetime.timedelta(days=180)
+ due = date_today() + datetime.timedelta(days=180)
class DatelessGroupMilestoneFactory(BaseGroupMilestoneFactory):
group = factory.SubFactory(GroupFactory, uses_milestone_dates=False)
| {"golden_diff": "diff --git a/ietf/group/factories.py b/ietf/group/factories.py\n--- a/ietf/group/factories.py\n+++ b/ietf/group/factories.py\n@@ -10,6 +10,8 @@\n from ietf.group.models import Group, Role, GroupEvent, GroupMilestone, \\\n GroupHistory, RoleHistory\n from ietf.review.factories import ReviewTeamSettingsFactory\n+from ietf.utils.timezone import date_today\n+\n \n class GroupFactory(factory.django.DjangoModelFactory):\n class Meta:\n@@ -68,7 +70,7 @@\n \n class DatedGroupMilestoneFactory(BaseGroupMilestoneFactory):\n group = factory.SubFactory(GroupFactory, uses_milestone_dates=True)\n- due = timezone.now()+datetime.timedelta(days=180)\n+ due = date_today() + datetime.timedelta(days=180)\n \n class DatelessGroupMilestoneFactory(BaseGroupMilestoneFactory):\n group = factory.SubFactory(GroupFactory, uses_milestone_dates=False)\n", "issue": "Possible timezone related failure\n### Describe the issue\n\nhttps://github.com/ietf-tools/datatracker/actions/runs/4071644533/jobs/7013629899\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "# Copyright The IETF Trust 2015-2022, All Rights Reserved\nimport datetime\nimport debug # pyflakes:ignore\nimport factory\n\nfrom typing import List # pyflakes:ignore\n\nfrom django.utils import timezone\n\nfrom ietf.group.models import Group, Role, GroupEvent, GroupMilestone, \\\n GroupHistory, RoleHistory\nfrom ietf.review.factories import ReviewTeamSettingsFactory\n\nclass GroupFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = Group\n django_get_or_create = ('acronym',)\n\n name = factory.Faker('sentence',nb_words=6)\n acronym = factory.Sequence(lambda n: 'acronym%d' %n)\n state_id = 'active'\n type_id = 'wg'\n list_email = factory.LazyAttribute(lambda a: '%[email protected]'% a.acronym)\n uses_milestone_dates = True\n used_roles = [] # type: List[str]\n\n @factory.lazy_attribute\n def parent(self):\n if self.type_id in ['wg','ag']:\n return GroupFactory(type_id='area')\n elif self.type_id in ['rg','rag']:\n return GroupFactory(acronym='irtf', type_id='irtf')\n else:\n return None\n\nclass ReviewTeamFactory(GroupFactory):\n\n type_id = 'review'\n\n @factory.post_generation\n def settings(obj, create, extracted, **kwargs):\n ReviewTeamSettingsFactory.create(group=obj,**kwargs)\n\nclass RoleFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = Role\n\n group = factory.SubFactory(GroupFactory)\n person = factory.SubFactory('ietf.person.factories.PersonFactory')\n email = factory.LazyAttribute(lambda obj: obj.person.email())\n\nclass GroupEventFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = GroupEvent\n\n group = factory.SubFactory(GroupFactory)\n by = factory.SubFactory('ietf.person.factories.PersonFactory')\n type = 'comment'\n desc = factory.Faker('paragraph')\n\nclass BaseGroupMilestoneFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = GroupMilestone\n\n group = factory.SubFactory(GroupFactory)\n state_id = 'active'\n desc = factory.Faker('sentence')\n\nclass DatedGroupMilestoneFactory(BaseGroupMilestoneFactory):\n group = factory.SubFactory(GroupFactory, uses_milestone_dates=True)\n due = timezone.now()+datetime.timedelta(days=180)\n\nclass DatelessGroupMilestoneFactory(BaseGroupMilestoneFactory):\n group = factory.SubFactory(GroupFactory, uses_milestone_dates=False)\n order = factory.Sequence(lambda n: n)\n\nclass GroupHistoryFactory(factory.django.DjangoModelFactory):\n class Meta:\n model=GroupHistory\n\n time = lambda: timezone.now()\n group = factory.SubFactory(GroupFactory, state_id='active')\n\n name = factory.LazyAttribute(lambda obj: obj.group.name)\n state_id = factory.LazyAttribute(lambda obj: obj.group.state_id)\n type_id = factory.LazyAttribute(lambda obj: obj.group.type_id)\n parent = factory.LazyAttribute(lambda obj: obj.group.parent)\n uses_milestone_dates = factory.LazyAttribute(lambda obj: obj.group.uses_milestone_dates)\n used_roles = factory.LazyAttribute(lambda obj: obj.group.used_roles)\n description = factory.LazyAttribute(lambda obj: obj.group.description)\n list_email = factory.LazyAttribute(lambda obj: '%[email protected]'% obj.group.acronym) #TODO : move this to GroupFactory\n list_subscribe = factory.LazyAttribute(lambda obj: obj.group.list_subscribe)\n list_archive = factory.LazyAttribute(lambda obj: obj.group.list_archive)\n comments = factory.LazyAttribute(lambda obj: obj.group.comments)\n meeting_seen_as_area = factory.LazyAttribute(lambda obj: obj.group.meeting_seen_as_area)\n acronym = factory.LazyAttribute(lambda obj: obj.group.acronym)\n\n @factory.post_generation\n def unused_states(obj, create, extracted, **kwargs):\n if create:\n if extracted:\n obj.unused_states.set(extracted)\n else:\n obj.unused_states.set(obj.group.unused_states.all())\n @factory.post_generation\n def unused_tags(obj, create, extracted, **kwargs):\n if create:\n if extracted:\n obj.unused_tags.set(extracted)\n else:\n obj.unused_tags.set(obj.group.unused_states.all()) \n\nclass RoleHistoryFactory(factory.django.DjangoModelFactory):\n class Meta:\n model=RoleHistory\n\n group = factory.SubFactory(GroupHistoryFactory)\n person = factory.SubFactory('ietf.person.factories.PersonFactory')\n email = factory.LazyAttribute(lambda obj: obj.person.email())\n\n", "path": "ietf/group/factories.py"}]} | 1,902 | 217 |
gh_patches_debug_22473 | rasdani/github-patches | git_diff | pytorch__TensorRT-2363 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add documentation in user guide on `torch.compile` usage
- `torch.compile` "from-scratch" usage
</issue>
<code>
[start of py/torch_tensorrt/dynamo/_settings.py]
1 from dataclasses import dataclass, field
2 from typing import Optional, Set
3
4 import torch
5 from torch_tensorrt._Device import Device
6 from torch_tensorrt.dynamo._defaults import (
7 DEBUG,
8 ENABLE_EXPERIMENTAL_DECOMPOSITIONS,
9 MAX_AUX_STREAMS,
10 MIN_BLOCK_SIZE,
11 OPTIMIZATION_LEVEL,
12 PASS_THROUGH_BUILD_FAILURES,
13 PRECISION,
14 REQUIRE_FULL_COMPILATION,
15 TRUNCATE_LONG_AND_DOUBLE,
16 USE_FAST_PARTITIONER,
17 USE_PYTHON_RUNTIME,
18 VERSION_COMPATIBLE,
19 WORKSPACE_SIZE,
20 default_device,
21 )
22
23
24 @dataclass
25 class CompilationSettings:
26 """Compilation settings for Torch-TensorRT Dynamo Paths
27
28 Args:
29 precision (torch.dtype): Model Layer precision
30 debug (bool): Whether to print out verbose debugging information
31 workspace_size (int): Workspace TRT is allowed to use for the module (0 is default)
32 min_block_size (int): Minimum number of operators per TRT-Engine Block
33 torch_executed_ops (Sequence[str]): Sequence of operations to run in Torch, regardless of converter coverage
34 pass_through_build_failures (bool): Whether to fail on TRT engine build errors (True) or not (False)
35 max_aux_streams (Optional[int]): Maximum number of allowed auxiliary TRT streams for each engine
36 version_compatible (bool): Provide version forward-compatibility for engine plan files
37 optimization_level (Optional[int]): Builder optimization 0-5, higher levels imply longer build time,
38 searching for more optimization options. TRT defaults to 3
39 use_python_runtime (Optional[bool]): Whether to strictly use Python runtime or C++ runtime. To auto-select a runtime
40 based on C++ dependency presence (preferentially choosing C++ runtime if available), leave the
41 argument as None
42 truncate_long_and_double (bool): Truncate int64/float64 TRT engine inputs or weights to int32/float32
43 enable_experimental_decompositions (bool): Whether to enable all core aten decompositions
44 or only a selected subset of them
45 """
46
47 precision: torch.dtype = PRECISION
48 debug: bool = DEBUG
49 workspace_size: int = WORKSPACE_SIZE
50 min_block_size: int = MIN_BLOCK_SIZE
51 torch_executed_ops: Set[str] = field(default_factory=set)
52 pass_through_build_failures: bool = PASS_THROUGH_BUILD_FAILURES
53 max_aux_streams: Optional[int] = MAX_AUX_STREAMS
54 version_compatible: bool = VERSION_COMPATIBLE
55 optimization_level: Optional[int] = OPTIMIZATION_LEVEL
56 use_python_runtime: Optional[bool] = USE_PYTHON_RUNTIME
57 truncate_long_and_double: bool = TRUNCATE_LONG_AND_DOUBLE
58 use_fast_partitioner: bool = USE_FAST_PARTITIONER
59 enable_experimental_decompositions: bool = ENABLE_EXPERIMENTAL_DECOMPOSITIONS
60 device: Device = field(default_factory=default_device)
61 require_full_compilation: bool = REQUIRE_FULL_COMPILATION
62
[end of py/torch_tensorrt/dynamo/_settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py/torch_tensorrt/dynamo/_settings.py b/py/torch_tensorrt/dynamo/_settings.py
--- a/py/torch_tensorrt/dynamo/_settings.py
+++ b/py/torch_tensorrt/dynamo/_settings.py
@@ -39,9 +39,13 @@
use_python_runtime (Optional[bool]): Whether to strictly use Python runtime or C++ runtime. To auto-select a runtime
based on C++ dependency presence (preferentially choosing C++ runtime if available), leave the
argument as None
- truncate_long_and_double (bool): Truncate int64/float64 TRT engine inputs or weights to int32/float32
+ truncate_long_and_double (bool): Whether to truncate int64/float64 TRT engine inputs or weights to int32/float32
+ use_fast_partitioner (bool): Whether to use the fast or global graph partitioning system
enable_experimental_decompositions (bool): Whether to enable all core aten decompositions
or only a selected subset of them
+ device (Device): GPU to compile the model on
+ require_full_compilation (bool): Whether to require the graph is fully compiled in TensorRT.
+ Only applicable for `ir="dynamo"`; has no effect for `torch.compile` path
"""
precision: torch.dtype = PRECISION
| {"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/_settings.py b/py/torch_tensorrt/dynamo/_settings.py\n--- a/py/torch_tensorrt/dynamo/_settings.py\n+++ b/py/torch_tensorrt/dynamo/_settings.py\n@@ -39,9 +39,13 @@\n use_python_runtime (Optional[bool]): Whether to strictly use Python runtime or C++ runtime. To auto-select a runtime\n based on C++ dependency presence (preferentially choosing C++ runtime if available), leave the\n argument as None\n- truncate_long_and_double (bool): Truncate int64/float64 TRT engine inputs or weights to int32/float32\n+ truncate_long_and_double (bool): Whether to truncate int64/float64 TRT engine inputs or weights to int32/float32\n+ use_fast_partitioner (bool): Whether to use the fast or global graph partitioning system\n enable_experimental_decompositions (bool): Whether to enable all core aten decompositions\n or only a selected subset of them\n+ device (Device): GPU to compile the model on\n+ require_full_compilation (bool): Whether to require the graph is fully compiled in TensorRT.\n+ Only applicable for `ir=\"dynamo\"`; has no effect for `torch.compile` path\n \"\"\"\n \n precision: torch.dtype = PRECISION\n", "issue": "Add documentation in user guide on `torch.compile` usage\n- `torch.compile` \"from-scratch\" usage\n", "before_files": [{"content": "from dataclasses import dataclass, field\nfrom typing import Optional, Set\n\nimport torch\nfrom torch_tensorrt._Device import Device\nfrom torch_tensorrt.dynamo._defaults import (\n DEBUG,\n ENABLE_EXPERIMENTAL_DECOMPOSITIONS,\n MAX_AUX_STREAMS,\n MIN_BLOCK_SIZE,\n OPTIMIZATION_LEVEL,\n PASS_THROUGH_BUILD_FAILURES,\n PRECISION,\n REQUIRE_FULL_COMPILATION,\n TRUNCATE_LONG_AND_DOUBLE,\n USE_FAST_PARTITIONER,\n USE_PYTHON_RUNTIME,\n VERSION_COMPATIBLE,\n WORKSPACE_SIZE,\n default_device,\n)\n\n\n@dataclass\nclass CompilationSettings:\n \"\"\"Compilation settings for Torch-TensorRT Dynamo Paths\n\n Args:\n precision (torch.dtype): Model Layer precision\n debug (bool): Whether to print out verbose debugging information\n workspace_size (int): Workspace TRT is allowed to use for the module (0 is default)\n min_block_size (int): Minimum number of operators per TRT-Engine Block\n torch_executed_ops (Sequence[str]): Sequence of operations to run in Torch, regardless of converter coverage\n pass_through_build_failures (bool): Whether to fail on TRT engine build errors (True) or not (False)\n max_aux_streams (Optional[int]): Maximum number of allowed auxiliary TRT streams for each engine\n version_compatible (bool): Provide version forward-compatibility for engine plan files\n optimization_level (Optional[int]): Builder optimization 0-5, higher levels imply longer build time,\n searching for more optimization options. TRT defaults to 3\n use_python_runtime (Optional[bool]): Whether to strictly use Python runtime or C++ runtime. To auto-select a runtime\n based on C++ dependency presence (preferentially choosing C++ runtime if available), leave the\n argument as None\n truncate_long_and_double (bool): Truncate int64/float64 TRT engine inputs or weights to int32/float32\n enable_experimental_decompositions (bool): Whether to enable all core aten decompositions\n or only a selected subset of them\n \"\"\"\n\n precision: torch.dtype = PRECISION\n debug: bool = DEBUG\n workspace_size: int = WORKSPACE_SIZE\n min_block_size: int = MIN_BLOCK_SIZE\n torch_executed_ops: Set[str] = field(default_factory=set)\n pass_through_build_failures: bool = PASS_THROUGH_BUILD_FAILURES\n max_aux_streams: Optional[int] = MAX_AUX_STREAMS\n version_compatible: bool = VERSION_COMPATIBLE\n optimization_level: Optional[int] = OPTIMIZATION_LEVEL\n use_python_runtime: Optional[bool] = USE_PYTHON_RUNTIME\n truncate_long_and_double: bool = TRUNCATE_LONG_AND_DOUBLE\n use_fast_partitioner: bool = USE_FAST_PARTITIONER\n enable_experimental_decompositions: bool = ENABLE_EXPERIMENTAL_DECOMPOSITIONS\n device: Device = field(default_factory=default_device)\n require_full_compilation: bool = REQUIRE_FULL_COMPILATION\n", "path": "py/torch_tensorrt/dynamo/_settings.py"}]} | 1,326 | 302 |
gh_patches_debug_59440 | rasdani/github-patches | git_diff | Pycord-Development__pycord-576 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SlashCommand Groups Issues
This issue is to keep track of the issues since we reworked groups.
</issue>
<code>
[start of examples/app_commands/slash_groups.py]
1 import discord
2
3 bot = discord.Bot()
4
5 # If you use commands.Bot, @bot.slash_command should be used for
6 # slash commands. You can use @bot.slash_command with discord.Bot as well
7
8 math = bot.command_group(
9 "math", "Commands related to mathematics."
10 ) # create a slash command group
11
12
13 @math.command(guild_ids=[...]) # create a slash command
14 async def add(ctx, num1: int, num2: int):
15 """Get the sum of 2 integers."""
16 await ctx.respond(f"The sum of these numbers is **{num1+num2}**")
17
18
19 # another way, creating the class manually
20
21 from discord.commands import SlashCommandGroup
22
23 math = SlashCommandGroup("math", "Commands related to mathematics.")
24
25
26 @math.command(guild_ids=[...])
27 async def add(ctx, num1: int, num2: int):
28 ...
29
30
31 bot.add_application_command(math)
32
33 bot.run("TOKEN")
34
[end of examples/app_commands/slash_groups.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/app_commands/slash_groups.py b/examples/app_commands/slash_groups.py
--- a/examples/app_commands/slash_groups.py
+++ b/examples/app_commands/slash_groups.py
@@ -5,7 +5,7 @@
# If you use commands.Bot, @bot.slash_command should be used for
# slash commands. You can use @bot.slash_command with discord.Bot as well
-math = bot.command_group(
+math = bot.create_group(
"math", "Commands related to mathematics."
) # create a slash command group
| {"golden_diff": "diff --git a/examples/app_commands/slash_groups.py b/examples/app_commands/slash_groups.py\n--- a/examples/app_commands/slash_groups.py\n+++ b/examples/app_commands/slash_groups.py\n@@ -5,7 +5,7 @@\n # If you use commands.Bot, @bot.slash_command should be used for\r\n # slash commands. You can use @bot.slash_command with discord.Bot as well\r\n \r\n-math = bot.command_group(\r\n+math = bot.create_group(\r\n \"math\", \"Commands related to mathematics.\"\r\n ) # create a slash command group\n", "issue": "SlashCommand Groups Issues\nThis issue is to keep track of the issues since we reworked groups.\n", "before_files": [{"content": "import discord\r\n\r\nbot = discord.Bot()\r\n\r\n# If you use commands.Bot, @bot.slash_command should be used for\r\n# slash commands. You can use @bot.slash_command with discord.Bot as well\r\n\r\nmath = bot.command_group(\r\n \"math\", \"Commands related to mathematics.\"\r\n) # create a slash command group\r\n\r\n\r\[email protected](guild_ids=[...]) # create a slash command\r\nasync def add(ctx, num1: int, num2: int):\r\n \"\"\"Get the sum of 2 integers.\"\"\"\r\n await ctx.respond(f\"The sum of these numbers is **{num1+num2}**\")\r\n\r\n\r\n# another way, creating the class manually\r\n\r\nfrom discord.commands import SlashCommandGroup\r\n\r\nmath = SlashCommandGroup(\"math\", \"Commands related to mathematics.\")\r\n\r\n\r\[email protected](guild_ids=[...])\r\nasync def add(ctx, num1: int, num2: int):\r\n ...\r\n\r\n\r\nbot.add_application_command(math)\r\n\r\nbot.run(\"TOKEN\")\r\n", "path": "examples/app_commands/slash_groups.py"}]} | 830 | 120 |
gh_patches_debug_67111 | rasdani/github-patches | git_diff | beeware__toga-1619 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scroll container not expanding to width of container
```
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
class AFV(toga.App):
def startup(self):
self.main_window = toga.MainWindow(title=self.formal_name)
box_test = toga.Box(style=Pack(direction=COLUMN, padding=5))
self.label_1 = toga.Label('TESTE 1')
self.lineEdit_1 = toga.TextInput()
self.label_2 = toga.Label('TESTE 2')
self.lineEdit_2 = toga.TextInput()
self.label_3 = toga.Label('TESTE 3')
self.lineEdit_3 = toga.TextInput()
self.label_4 = toga.Label('TESTE 4')
self.lineEdit_4 = toga.TextInput()
self.label_5 = toga.Label('TESTE 5')
self.lineEdit_5 = toga.TextInput()
box_test.add(self.label_1, self.lineEdit_1,
self.label_2, self.lineEdit_2,
self.label_3, self.lineEdit_3,
self.label_4, self.lineEdit_4,
self.label_5, self.lineEdit_5)
self.container = toga.ScrollContainer(horizontal=True, vertical=True)
self.container.content = box_test
self.main_window.content = self.container
self.main_window.show()
def main():
return AFV()
```
When using the widget it leaves the widgets in the wrong shape and size on the screen.
The ScrollContainer doesn't make the TextInput widget fill to the bottom of the screen, it measures according to the size of the Label text.

Worked on Briefcase 0.3.9; new screenshot is from Briefcase 0.3.10.
</issue>
<code>
[start of src/android/toga_android/widgets/scrollcontainer.py]
1 from travertino.size import at_least
2
3 from toga_android.window import AndroidViewport
4
5 from ..libs.android.view import (
6 Gravity,
7 View__MeasureSpec,
8 View__OnTouchListener
9 )
10 from ..libs.android.widget import (
11 HorizontalScrollView,
12 LinearLayout__LayoutParams,
13 ScrollView
14 )
15 from .base import Widget
16
17
18 class TogaOnTouchListener(View__OnTouchListener):
19 is_scrolling_enabled = True
20
21 def __init__(self):
22 super().__init__()
23
24 def onTouch(self, view, motion_event):
25 if self.is_scrolling_enabled:
26 return view.onTouchEvent(motion_event)
27 else:
28 return True
29
30
31 class ScrollContainer(Widget):
32 vScrollListener = None
33 hScrollView = None
34 hScrollListener = None
35
36 def create(self):
37 vScrollView = ScrollView(self._native_activity)
38 vScrollView_layout_params = LinearLayout__LayoutParams(
39 LinearLayout__LayoutParams.MATCH_PARENT,
40 LinearLayout__LayoutParams.MATCH_PARENT
41 )
42 vScrollView_layout_params.gravity = Gravity.TOP
43 vScrollView.setLayoutParams(vScrollView_layout_params)
44 self.vScrollListener = TogaOnTouchListener()
45 self.vScrollListener.is_scrolling_enabled = self.interface.vertical
46 vScrollView.setOnTouchListener(self.vScrollListener)
47 self.native = vScrollView
48 self.hScrollView = HorizontalScrollView(self._native_activity)
49 hScrollView_layout_params = LinearLayout__LayoutParams(
50 LinearLayout__LayoutParams.MATCH_PARENT,
51 LinearLayout__LayoutParams.MATCH_PARENT
52 )
53 hScrollView_layout_params.gravity = Gravity.LEFT
54 self.hScrollListener = TogaOnTouchListener()
55 self.hScrollListener.is_scrolling_enabled = self.interface.horizontal
56 self.hScrollView.setOnTouchListener(self.hScrollListener)
57 vScrollView.addView(self.hScrollView, hScrollView_layout_params)
58 if self.interface.content is not None:
59 self.set_content(self.interface.content)
60
61 def set_content(self, widget):
62 widget.viewport = AndroidViewport(widget.native)
63 content_view_params = LinearLayout__LayoutParams(
64 LinearLayout__LayoutParams.MATCH_PARENT,
65 LinearLayout__LayoutParams.MATCH_PARENT
66 )
67 if widget.container:
68 widget.container = None
69 if self.interface.content:
70 self.hScrollView.removeAllViews()
71 self.hScrollView.addView(widget.native, content_view_params)
72 for child in widget.interface.children:
73 if child._impl.container:
74 child._impl.container = None
75 child._impl.container = widget
76
77 def set_vertical(self, value):
78 self.vScrollListener.is_scrolling_enabled = value
79
80 def set_horizontal(self, value):
81 self.hScrollListener.is_scrolling_enabled = value
82
83 def set_on_scroll(self, on_scroll):
84 self.interface.factory.not_implemented("ScrollContainer.set_on_scroll()")
85
86 def get_vertical_position(self):
87 self.interface.factory.not_implemented(
88 "ScrollContainer.get_vertical_position()"
89 )
90 return 0
91
92 def set_vertical_position(self, vertical_position):
93 self.interface.factory.not_implemented(
94 "ScrollContainer.set_vertical_position()"
95 )
96
97 def get_horizontal_position(self):
98 self.interface.factory.not_implemented(
99 "ScrollContainer.get_horizontal_position()"
100 )
101 return 0
102
103 def set_horizontal_position(self, horizontal_position):
104 self.interface.factory.not_implemented(
105 "ScrollContainer.set_horizontal_position()"
106 )
107
108 def rehint(self):
109 # Android can crash when rendering some widgets until they have their layout params set. Guard for that case.
110 if not self.native.getLayoutParams():
111 return
112 self.native.measure(
113 View__MeasureSpec.UNSPECIFIED,
114 View__MeasureSpec.UNSPECIFIED,
115 )
116 self.interface.intrinsic.width = at_least(self.native.getMeasuredWidth())
117 self.interface.intrinsic.height = at_least(self.native.getMeasuredHeight())
118
[end of src/android/toga_android/widgets/scrollcontainer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/android/toga_android/widgets/scrollcontainer.py b/src/android/toga_android/widgets/scrollcontainer.py
--- a/src/android/toga_android/widgets/scrollcontainer.py
+++ b/src/android/toga_android/widgets/scrollcontainer.py
@@ -59,7 +59,7 @@
self.set_content(self.interface.content)
def set_content(self, widget):
- widget.viewport = AndroidViewport(widget.native)
+ widget.viewport = AndroidViewport(self.native)
content_view_params = LinearLayout__LayoutParams(
LinearLayout__LayoutParams.MATCH_PARENT,
LinearLayout__LayoutParams.MATCH_PARENT
| {"golden_diff": "diff --git a/src/android/toga_android/widgets/scrollcontainer.py b/src/android/toga_android/widgets/scrollcontainer.py\n--- a/src/android/toga_android/widgets/scrollcontainer.py\n+++ b/src/android/toga_android/widgets/scrollcontainer.py\n@@ -59,7 +59,7 @@\n self.set_content(self.interface.content)\n \n def set_content(self, widget):\n- widget.viewport = AndroidViewport(widget.native)\n+ widget.viewport = AndroidViewport(self.native)\n content_view_params = LinearLayout__LayoutParams(\n LinearLayout__LayoutParams.MATCH_PARENT,\n LinearLayout__LayoutParams.MATCH_PARENT\n", "issue": "Scroll container not expanding to width of container\n```\r\nimport toga\r\nfrom toga.style import Pack\r\nfrom toga.style.pack import COLUMN, ROW\r\n\r\nclass AFV(toga.App):\r\n\r\n def startup(self):\r\n self.main_window = toga.MainWindow(title=self.formal_name)\r\n\r\n box_test = toga.Box(style=Pack(direction=COLUMN, padding=5))\r\n self.label_1 = toga.Label('TESTE 1')\r\n self.lineEdit_1 = toga.TextInput()\r\n self.label_2 = toga.Label('TESTE 2')\r\n self.lineEdit_2 = toga.TextInput()\r\n self.label_3 = toga.Label('TESTE 3')\r\n self.lineEdit_3 = toga.TextInput()\r\n self.label_4 = toga.Label('TESTE 4')\r\n self.lineEdit_4 = toga.TextInput()\r\n self.label_5 = toga.Label('TESTE 5')\r\n self.lineEdit_5 = toga.TextInput()\r\n\r\n box_test.add(self.label_1, self.lineEdit_1, \r\n self.label_2, self.lineEdit_2, \r\n self.label_3, self.lineEdit_3, \r\n self.label_4, self.lineEdit_4, \r\n self.label_5, self.lineEdit_5)\r\n self.container = toga.ScrollContainer(horizontal=True, vertical=True)\r\n self.container.content = box_test\r\n\r\n\r\n self.main_window.content = self.container\r\n self.main_window.show()\r\n\r\n\r\ndef main():\r\n return AFV()\r\n```\r\n\r\n\r\nWhen using the widget it leaves the widgets in the wrong shape and size on the screen.\r\nThe ScrollContainer doesn't make the TextInput widget fill to the bottom of the screen, it measures according to the size of the Label text.\r\n\r\n\r\n\r\nWorked on Briefcase 0.3.9; new screenshot is from Briefcase 0.3.10.\n", "before_files": [{"content": "from travertino.size import at_least\n\nfrom toga_android.window import AndroidViewport\n\nfrom ..libs.android.view import (\n Gravity,\n View__MeasureSpec,\n View__OnTouchListener\n)\nfrom ..libs.android.widget import (\n HorizontalScrollView,\n LinearLayout__LayoutParams,\n ScrollView\n)\nfrom .base import Widget\n\n\nclass TogaOnTouchListener(View__OnTouchListener):\n is_scrolling_enabled = True\n\n def __init__(self):\n super().__init__()\n\n def onTouch(self, view, motion_event):\n if self.is_scrolling_enabled:\n return view.onTouchEvent(motion_event)\n else:\n return True\n\n\nclass ScrollContainer(Widget):\n vScrollListener = None\n hScrollView = None\n hScrollListener = None\n\n def create(self):\n vScrollView = ScrollView(self._native_activity)\n vScrollView_layout_params = LinearLayout__LayoutParams(\n LinearLayout__LayoutParams.MATCH_PARENT,\n LinearLayout__LayoutParams.MATCH_PARENT\n )\n vScrollView_layout_params.gravity = Gravity.TOP\n vScrollView.setLayoutParams(vScrollView_layout_params)\n self.vScrollListener = TogaOnTouchListener()\n self.vScrollListener.is_scrolling_enabled = self.interface.vertical\n vScrollView.setOnTouchListener(self.vScrollListener)\n self.native = vScrollView\n self.hScrollView = HorizontalScrollView(self._native_activity)\n hScrollView_layout_params = LinearLayout__LayoutParams(\n LinearLayout__LayoutParams.MATCH_PARENT,\n LinearLayout__LayoutParams.MATCH_PARENT\n )\n hScrollView_layout_params.gravity = Gravity.LEFT\n self.hScrollListener = TogaOnTouchListener()\n self.hScrollListener.is_scrolling_enabled = self.interface.horizontal\n self.hScrollView.setOnTouchListener(self.hScrollListener)\n vScrollView.addView(self.hScrollView, hScrollView_layout_params)\n if self.interface.content is not None:\n self.set_content(self.interface.content)\n\n def set_content(self, widget):\n widget.viewport = AndroidViewport(widget.native)\n content_view_params = LinearLayout__LayoutParams(\n LinearLayout__LayoutParams.MATCH_PARENT,\n LinearLayout__LayoutParams.MATCH_PARENT\n )\n if widget.container:\n widget.container = None\n if self.interface.content:\n self.hScrollView.removeAllViews()\n self.hScrollView.addView(widget.native, content_view_params)\n for child in widget.interface.children:\n if child._impl.container:\n child._impl.container = None\n child._impl.container = widget\n\n def set_vertical(self, value):\n self.vScrollListener.is_scrolling_enabled = value\n\n def set_horizontal(self, value):\n self.hScrollListener.is_scrolling_enabled = value\n\n def set_on_scroll(self, on_scroll):\n self.interface.factory.not_implemented(\"ScrollContainer.set_on_scroll()\")\n\n def get_vertical_position(self):\n self.interface.factory.not_implemented(\n \"ScrollContainer.get_vertical_position()\"\n )\n return 0\n\n def set_vertical_position(self, vertical_position):\n self.interface.factory.not_implemented(\n \"ScrollContainer.set_vertical_position()\"\n )\n\n def get_horizontal_position(self):\n self.interface.factory.not_implemented(\n \"ScrollContainer.get_horizontal_position()\"\n )\n return 0\n\n def set_horizontal_position(self, horizontal_position):\n self.interface.factory.not_implemented(\n \"ScrollContainer.set_horizontal_position()\"\n )\n\n def rehint(self):\n # Android can crash when rendering some widgets until they have their layout params set. Guard for that case.\n if not self.native.getLayoutParams():\n return\n self.native.measure(\n View__MeasureSpec.UNSPECIFIED,\n View__MeasureSpec.UNSPECIFIED,\n )\n self.interface.intrinsic.width = at_least(self.native.getMeasuredWidth())\n self.interface.intrinsic.height = at_least(self.native.getMeasuredHeight())\n", "path": "src/android/toga_android/widgets/scrollcontainer.py"}]} | 2,012 | 125 |
gh_patches_debug_17407 | rasdani/github-patches | git_diff | pypi__warehouse-1485 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refuse account creation when using disposable email addresses.
long term it makes no sense to accept accounts which use an email address which is disposable for managing legit packages. short/near term it opens an easy door for spammers to create accounts on PyPI.
i've implemented blacklisting for account signup and email swaps which use the blacklist at https://github.com/martenson/disposable-email-domains for legacy pypi.
</issue>
<code>
[start of warehouse/accounts/forms.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12 import re
13
14 import wtforms
15 import wtforms.fields.html5
16
17 from warehouse import forms, recaptcha
18
19
20 class CredentialsMixin:
21 username = wtforms.StringField(
22 validators=[
23 wtforms.validators.DataRequired(),
24 wtforms.validators.Length(max=50),
25 ],
26 )
27
28 password = wtforms.PasswordField(
29 validators=[
30 wtforms.validators.DataRequired(),
31 ],
32 )
33
34 def __init__(self, *args, user_service, **kwargs):
35 super().__init__(*args, **kwargs)
36 self.user_service = user_service
37
38
39 # XXX: This is a naive password strength validator, but something that can
40 # easily be replicated in JS for client-side feedback.
41 # see: https://github.com/pypa/warehouse/issues/6
42 PWD_MIN_LEN = 8
43 PWD_RE = re.compile(r"""
44 ^ # start
45 (?=.*[A-Z]+.*) # >= 1 upper case
46 (?=.*[a-z]+.*) # >= 1 lower case
47 (?=.*[0-9]+.*) # >= 1 number
48 (?=.*[.*~`\!@#$%^&\*\(\)_+-={}|\[\]\\:";'<>?,\./]+.*) # >= 1 special char
49 .{""" + str(PWD_MIN_LEN) + """,} # >= 8 chars
50 $ # end
51 """, re.X)
52
53
54 class RegistrationForm(CredentialsMixin, forms.Form):
55 password_confirm = wtforms.PasswordField(
56 validators=[
57 wtforms.validators.DataRequired(),
58 wtforms.validators.EqualTo(
59 "password", "Passwords must match."
60 ),
61 ],
62 )
63
64 full_name = wtforms.StringField()
65
66 email = wtforms.fields.html5.EmailField(
67 validators=[
68 wtforms.validators.DataRequired(),
69 wtforms.validators.Email(),
70 ],
71 )
72
73 g_recaptcha_response = wtforms.StringField()
74
75 def __init__(self, *args, recaptcha_service, **kwargs):
76 super().__init__(*args, **kwargs)
77 self.recaptcha_service = recaptcha_service
78
79 def validate_username(self, field):
80 if self.user_service.find_userid(field.data) is not None:
81 raise wtforms.validators.ValidationError(
82 "Username exists.")
83
84 def validate_email(self, field):
85 if self.user_service.find_userid_by_email(field.data) is not None:
86 raise wtforms.validators.ValidationError("Email exists.")
87
88 def validate_g_recaptcha_response(self, field):
89 # do required data validation here due to enabled flag being required
90 if self.recaptcha_service.enabled and not field.data:
91 raise wtforms.validators.ValidationError("Recaptcha error.")
92 try:
93 self.recaptcha_service.verify_response(field.data)
94 except recaptcha.RecaptchaError:
95 # TODO: log error
96 # don't want to provide the user with any detail
97 raise wtforms.validators.ValidationError("Recaptcha error.")
98
99 def validate_password(self, field):
100 if not PWD_RE.match(field.data):
101 raise wtforms.validators.ValidationError(
102 "Password must contain an upper case letter, a lower case "
103 "letter, a number, a special character and be at least "
104 "%d characters in length" % PWD_MIN_LEN
105 )
106
107
108 class LoginForm(CredentialsMixin, forms.Form):
109 def validate_username(self, field):
110 userid = self.user_service.find_userid(field.data)
111
112 if userid is None:
113 raise wtforms.validators.ValidationError("Invalid user.")
114
115 def validate_password(self, field):
116 userid = self.user_service.find_userid(self.username.data)
117 if userid is not None:
118 if not self.user_service.check_password(userid, field.data):
119 raise wtforms.validators.ValidationError("Invalid password.")
120
[end of warehouse/accounts/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/accounts/forms.py b/warehouse/accounts/forms.py
--- a/warehouse/accounts/forms.py
+++ b/warehouse/accounts/forms.py
@@ -11,6 +11,7 @@
# limitations under the License.
import re
+import disposable_email_domains
import wtforms
import wtforms.fields.html5
@@ -84,6 +85,9 @@
def validate_email(self, field):
if self.user_service.find_userid_by_email(field.data) is not None:
raise wtforms.validators.ValidationError("Email exists.")
+ domain = field.data.split('@')[-1]
+ if domain in disposable_email_domains.blacklist:
+ raise wtforms.validators.ValidationError("Disposable email.")
def validate_g_recaptcha_response(self, field):
# do required data validation here due to enabled flag being required
| {"golden_diff": "diff --git a/warehouse/accounts/forms.py b/warehouse/accounts/forms.py\n--- a/warehouse/accounts/forms.py\n+++ b/warehouse/accounts/forms.py\n@@ -11,6 +11,7 @@\n # limitations under the License.\n import re\n \n+import disposable_email_domains\n import wtforms\n import wtforms.fields.html5\n \n@@ -84,6 +85,9 @@\n def validate_email(self, field):\n if self.user_service.find_userid_by_email(field.data) is not None:\n raise wtforms.validators.ValidationError(\"Email exists.\")\n+ domain = field.data.split('@')[-1]\n+ if domain in disposable_email_domains.blacklist:\n+ raise wtforms.validators.ValidationError(\"Disposable email.\")\n \n def validate_g_recaptcha_response(self, field):\n # do required data validation here due to enabled flag being required\n", "issue": "Refuse account creation when using disposable email addresses.\nlong term it makes no sense to accept accounts which use an email address which is disposable for managing legit packages. short/near term it opens an easy door for spammers to create accounts on PyPI.\n\ni've implemented blacklisting for account signup and email swaps which use the blacklist at https://github.com/martenson/disposable-email-domains for legacy pypi.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport re\n\nimport wtforms\nimport wtforms.fields.html5\n\nfrom warehouse import forms, recaptcha\n\n\nclass CredentialsMixin:\n username = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Length(max=50),\n ],\n )\n\n password = wtforms.PasswordField(\n validators=[\n wtforms.validators.DataRequired(),\n ],\n )\n\n def __init__(self, *args, user_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n\n\n# XXX: This is a naive password strength validator, but something that can\n# easily be replicated in JS for client-side feedback.\n# see: https://github.com/pypa/warehouse/issues/6\nPWD_MIN_LEN = 8\nPWD_RE = re.compile(r\"\"\"\n^ # start\n(?=.*[A-Z]+.*) # >= 1 upper case\n(?=.*[a-z]+.*) # >= 1 lower case\n(?=.*[0-9]+.*) # >= 1 number\n(?=.*[.*~`\\!@#$%^&\\*\\(\\)_+-={}|\\[\\]\\\\:\";'<>?,\\./]+.*) # >= 1 special char\n.{\"\"\" + str(PWD_MIN_LEN) + \"\"\",} # >= 8 chars\n$ # end\n\"\"\", re.X)\n\n\nclass RegistrationForm(CredentialsMixin, forms.Form):\n password_confirm = wtforms.PasswordField(\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.EqualTo(\n \"password\", \"Passwords must match.\"\n ),\n ],\n )\n\n full_name = wtforms.StringField()\n\n email = wtforms.fields.html5.EmailField(\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Email(),\n ],\n )\n\n g_recaptcha_response = wtforms.StringField()\n\n def __init__(self, *args, recaptcha_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.recaptcha_service = recaptcha_service\n\n def validate_username(self, field):\n if self.user_service.find_userid(field.data) is not None:\n raise wtforms.validators.ValidationError(\n \"Username exists.\")\n\n def validate_email(self, field):\n if self.user_service.find_userid_by_email(field.data) is not None:\n raise wtforms.validators.ValidationError(\"Email exists.\")\n\n def validate_g_recaptcha_response(self, field):\n # do required data validation here due to enabled flag being required\n if self.recaptcha_service.enabled and not field.data:\n raise wtforms.validators.ValidationError(\"Recaptcha error.\")\n try:\n self.recaptcha_service.verify_response(field.data)\n except recaptcha.RecaptchaError:\n # TODO: log error\n # don't want to provide the user with any detail\n raise wtforms.validators.ValidationError(\"Recaptcha error.\")\n\n def validate_password(self, field):\n if not PWD_RE.match(field.data):\n raise wtforms.validators.ValidationError(\n \"Password must contain an upper case letter, a lower case \"\n \"letter, a number, a special character and be at least \"\n \"%d characters in length\" % PWD_MIN_LEN\n )\n\n\nclass LoginForm(CredentialsMixin, forms.Form):\n def validate_username(self, field):\n userid = self.user_service.find_userid(field.data)\n\n if userid is None:\n raise wtforms.validators.ValidationError(\"Invalid user.\")\n\n def validate_password(self, field):\n userid = self.user_service.find_userid(self.username.data)\n if userid is not None:\n if not self.user_service.check_password(userid, field.data):\n raise wtforms.validators.ValidationError(\"Invalid password.\")\n", "path": "warehouse/accounts/forms.py"}]} | 1,773 | 177 |
gh_patches_debug_12917 | rasdani/github-patches | git_diff | elastic__apm-agent-python-724 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'AsyncTransport' has no attribute '_start_event_processor'
**Describe the bug**: ...
After upgrading to `elastic-apm==5.4.1` I now get an error when Celery starts:
```
<function _register_worker_signals.<locals>.worker_startup at 0x7feae4beb620> raised: AttributeError("'AsyncTransport' object has no attribute '_start_event_processor'",)
Traceback (most recent call last):
File "/venv/lib/python3.6/site-packages/celery/utils/dispatch/signal.py", line 288, in send
response = receiver(signal=self, sender=sender, **named)
File "/venv/1.37.1/lib/python3.6/site-packages/elasticapm/contrib/celery/__init__.py", line 80, in worker_startup
client._transport._start_event_processor()
AttributeError: 'AsyncTransport' object has no attribute '_start_event_processor'
```
**Environment (please complete the following information)**
- OS: Linux-3.10.0-1062.9.1.el7.x86_64-x86_64-with-centos-7.7.1908-Core 2020-02-18 16:24:31
- Python version: Python 3.6.8
- Framework and version:
celery 4.4.0
Django 3.0.3
- APM Server version: ?
- Agent version: 5.4.1
I see the same error mentioned in issue #704, but I don't seem to have an issue with restarting Celery workers.
</issue>
<code>
[start of elasticapm/contrib/celery/__init__.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31
32 from celery import signals
33
34 from elasticapm.utils import get_name_from_func
35
36
37 class CeleryFilter(object):
38 def filter(self, record):
39 if record.funcName in ("_log_error",):
40 return 0
41 else:
42 return 1
43
44
45 def register_exception_tracking(client):
46 dispatch_uid = "elasticapm-exc-tracking"
47
48 def process_failure_signal(sender, task_id, exception, args, kwargs, traceback, einfo, **kw):
49 client.capture_exception(
50 extra={"task_id": task_id, "task": sender, "args": args, "kwargs": kwargs}, handled=False
51 )
52
53 signals.task_failure.disconnect(process_failure_signal, dispatch_uid=dispatch_uid)
54 signals.task_failure.connect(process_failure_signal, weak=False, dispatch_uid=dispatch_uid)
55 _register_worker_signals(client)
56
57
58 def register_instrumentation(client):
59 def begin_transaction(*args, **kwargs):
60 client.begin_transaction("celery")
61
62 def end_transaction(task_id, task, *args, **kwargs):
63 name = get_name_from_func(task)
64 client.end_transaction(name, kwargs.get("state", "None"))
65
66 dispatch_uid = "elasticapm-tracing-%s"
67
68 # unregister any existing clients
69 signals.task_prerun.disconnect(begin_transaction, dispatch_uid=dispatch_uid % "prerun")
70 signals.task_postrun.disconnect(end_transaction, dispatch_uid=dispatch_uid % "postrun")
71
72 # register for this client
73 signals.task_prerun.connect(begin_transaction, dispatch_uid=dispatch_uid % "prerun", weak=False)
74 signals.task_postrun.connect(end_transaction, weak=False, dispatch_uid=dispatch_uid % "postrun")
75 _register_worker_signals(client)
76
77
78 def _register_worker_signals(client):
79 def worker_startup(*args, **kwargs):
80 client._transport._start_event_processor()
81
82 def worker_shutdown(*args, **kwargs):
83 client.close()
84
85 def connect_worker_process_init(*args, **kwargs):
86 signals.worker_process_init.connect(worker_startup, dispatch_uid="elasticapm-start-worker", weak=False)
87 signals.worker_process_shutdown.connect(worker_shutdown, dispatch_uid="elasticapm-shutdown-worker", weak=False)
88
89 signals.worker_init.connect(
90 connect_worker_process_init, dispatch_uid="elasticapm-connect-start-threads", weak=False
91 )
92
[end of elasticapm/contrib/celery/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/contrib/celery/__init__.py b/elasticapm/contrib/celery/__init__.py
--- a/elasticapm/contrib/celery/__init__.py
+++ b/elasticapm/contrib/celery/__init__.py
@@ -76,14 +76,10 @@
def _register_worker_signals(client):
- def worker_startup(*args, **kwargs):
- client._transport._start_event_processor()
-
def worker_shutdown(*args, **kwargs):
client.close()
def connect_worker_process_init(*args, **kwargs):
- signals.worker_process_init.connect(worker_startup, dispatch_uid="elasticapm-start-worker", weak=False)
signals.worker_process_shutdown.connect(worker_shutdown, dispatch_uid="elasticapm-shutdown-worker", weak=False)
signals.worker_init.connect(
| {"golden_diff": "diff --git a/elasticapm/contrib/celery/__init__.py b/elasticapm/contrib/celery/__init__.py\n--- a/elasticapm/contrib/celery/__init__.py\n+++ b/elasticapm/contrib/celery/__init__.py\n@@ -76,14 +76,10 @@\n \n \n def _register_worker_signals(client):\n- def worker_startup(*args, **kwargs):\n- client._transport._start_event_processor()\n-\n def worker_shutdown(*args, **kwargs):\n client.close()\n \n def connect_worker_process_init(*args, **kwargs):\n- signals.worker_process_init.connect(worker_startup, dispatch_uid=\"elasticapm-start-worker\", weak=False)\n signals.worker_process_shutdown.connect(worker_shutdown, dispatch_uid=\"elasticapm-shutdown-worker\", weak=False)\n \n signals.worker_init.connect(\n", "issue": "'AsyncTransport' has no attribute '_start_event_processor'\n**Describe the bug**: ...\r\n\r\nAfter upgrading to `elastic-apm==5.4.1` I now get an error when Celery starts:\r\n\r\n```\r\n<function _register_worker_signals.<locals>.worker_startup at 0x7feae4beb620> raised: AttributeError(\"'AsyncTransport' object has no attribute '_start_event_processor'\",)\r\nTraceback (most recent call last):\r\n File \"/venv/lib/python3.6/site-packages/celery/utils/dispatch/signal.py\", line 288, in send\r\n response = receiver(signal=self, sender=sender, **named)\r\n File \"/venv/1.37.1/lib/python3.6/site-packages/elasticapm/contrib/celery/__init__.py\", line 80, in worker_startup\r\n client._transport._start_event_processor()\r\nAttributeError: 'AsyncTransport' object has no attribute '_start_event_processor' \r\n```\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux-3.10.0-1062.9.1.el7.x86_64-x86_64-with-centos-7.7.1908-Core 2020-02-18 16:24:31\r\n- Python version: Python 3.6.8\r\n- Framework and version: \r\n celery 4.4.0\r\n Django 3.0.3\r\n- APM Server version: ?\r\n- Agent version: 5.4.1\r\n\r\nI see the same error mentioned in issue #704, but I don't seem to have an issue with restarting Celery workers.\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom celery import signals\n\nfrom elasticapm.utils import get_name_from_func\n\n\nclass CeleryFilter(object):\n def filter(self, record):\n if record.funcName in (\"_log_error\",):\n return 0\n else:\n return 1\n\n\ndef register_exception_tracking(client):\n dispatch_uid = \"elasticapm-exc-tracking\"\n\n def process_failure_signal(sender, task_id, exception, args, kwargs, traceback, einfo, **kw):\n client.capture_exception(\n extra={\"task_id\": task_id, \"task\": sender, \"args\": args, \"kwargs\": kwargs}, handled=False\n )\n\n signals.task_failure.disconnect(process_failure_signal, dispatch_uid=dispatch_uid)\n signals.task_failure.connect(process_failure_signal, weak=False, dispatch_uid=dispatch_uid)\n _register_worker_signals(client)\n\n\ndef register_instrumentation(client):\n def begin_transaction(*args, **kwargs):\n client.begin_transaction(\"celery\")\n\n def end_transaction(task_id, task, *args, **kwargs):\n name = get_name_from_func(task)\n client.end_transaction(name, kwargs.get(\"state\", \"None\"))\n\n dispatch_uid = \"elasticapm-tracing-%s\"\n\n # unregister any existing clients\n signals.task_prerun.disconnect(begin_transaction, dispatch_uid=dispatch_uid % \"prerun\")\n signals.task_postrun.disconnect(end_transaction, dispatch_uid=dispatch_uid % \"postrun\")\n\n # register for this client\n signals.task_prerun.connect(begin_transaction, dispatch_uid=dispatch_uid % \"prerun\", weak=False)\n signals.task_postrun.connect(end_transaction, weak=False, dispatch_uid=dispatch_uid % \"postrun\")\n _register_worker_signals(client)\n\n\ndef _register_worker_signals(client):\n def worker_startup(*args, **kwargs):\n client._transport._start_event_processor()\n\n def worker_shutdown(*args, **kwargs):\n client.close()\n\n def connect_worker_process_init(*args, **kwargs):\n signals.worker_process_init.connect(worker_startup, dispatch_uid=\"elasticapm-start-worker\", weak=False)\n signals.worker_process_shutdown.connect(worker_shutdown, dispatch_uid=\"elasticapm-shutdown-worker\", weak=False)\n\n signals.worker_init.connect(\n connect_worker_process_init, dispatch_uid=\"elasticapm-connect-start-threads\", weak=False\n )\n", "path": "elasticapm/contrib/celery/__init__.py"}]} | 1,938 | 187 |
gh_patches_debug_9210 | rasdani/github-patches | git_diff | chainer__chainer-3237 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DictDataset throws an internal error only in Python 3.
The following code throws an internal error only in Python 3 environment with the latest version of Chainer. (3.0.0b1, 8bcac6f)
```py
from chainer.datasets import DictDataset
def main():
a = range(10)
b = range(10, 20)
dataset = DictDataset(x=a, y=b)
print(dataset[0:5])
if __name__ == '__main__':
main()
```
In Python 3.6.1,
```sh
$ python --version
Python 3.6.1
$ python poc.py
Traceback (most recent call last):
File "poc.py", line 12, in <module>
main()
File "poc.py", line 8, in main
print(dataset[0:5])
File "/home/igarashi/projects/chainer/chainer/datasets/dict_dataset.py", line 34, in __getitem__
length = len(six.itervalues(batches).next())
AttributeError: 'dict_valueiterator' object has no attribute 'next'
```
In Python 2.7.13,
```sh
$ python --version
Python 2.7.13
$ python poc.py
[{'y': 10, 'x': 0}, {'y': 11, 'x': 1}, {'y': 12, 'x': 2}, {'y': 13, 'x': 3}, {'y': 14, 'x': 4}]
```
It is because an instance of `six.Iterator` doesn't have `next()` method in the Python 3 environment.
[Reference](http://pythonhosted.org/six/#six.Iterator)
</issue>
<code>
[start of chainer/datasets/dict_dataset.py]
1 import six
2
3
4 class DictDataset(object):
5
6 """Dataset of a dictionary of datasets.
7
8 It combines multiple datasets into one dataset. Each example is represented
9 by a dictionary mapping a key to an example of the corresponding dataset.
10
11 Args:
12 datasets: Underlying datasets. The keys are used as the keys of each
13 example. All datasets must have the same length.
14
15 """
16
17 def __init__(self, **datasets):
18 if not datasets:
19 raise ValueError('no datasets are given')
20 length = None
21 for key, dataset in six.iteritems(datasets):
22 if length is None:
23 length = len(dataset)
24 elif length != len(dataset):
25 raise ValueError(
26 'dataset length conflicts at "{}"'.format(key))
27 self._datasets = datasets
28 self._length = length
29
30 def __getitem__(self, index):
31 batches = {key: dataset[index]
32 for key, dataset in six.iteritems(self._datasets)}
33 if isinstance(index, slice):
34 length = len(six.itervalues(batches).next())
35 return [{key: batch[i] for key, batch in six.iteritems(batches)}
36 for i in six.moves.range(length)]
37 else:
38 return batches
39
40 def __len__(self):
41 return self._length
42
[end of chainer/datasets/dict_dataset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/datasets/dict_dataset.py b/chainer/datasets/dict_dataset.py
--- a/chainer/datasets/dict_dataset.py
+++ b/chainer/datasets/dict_dataset.py
@@ -31,7 +31,7 @@
batches = {key: dataset[index]
for key, dataset in six.iteritems(self._datasets)}
if isinstance(index, slice):
- length = len(six.itervalues(batches).next())
+ length = len(six.next(six.itervalues(batches)))
return [{key: batch[i] for key, batch in six.iteritems(batches)}
for i in six.moves.range(length)]
else:
| {"golden_diff": "diff --git a/chainer/datasets/dict_dataset.py b/chainer/datasets/dict_dataset.py\n--- a/chainer/datasets/dict_dataset.py\n+++ b/chainer/datasets/dict_dataset.py\n@@ -31,7 +31,7 @@\n batches = {key: dataset[index]\n for key, dataset in six.iteritems(self._datasets)}\n if isinstance(index, slice):\n- length = len(six.itervalues(batches).next())\n+ length = len(six.next(six.itervalues(batches)))\n return [{key: batch[i] for key, batch in six.iteritems(batches)}\n for i in six.moves.range(length)]\n else:\n", "issue": "DictDataset throws an internal error only in Python 3.\nThe following code throws an internal error only in Python 3 environment with the latest version of Chainer. (3.0.0b1, 8bcac6f)\r\n```py\r\nfrom chainer.datasets import DictDataset\r\n\r\ndef main():\r\n a = range(10)\r\n b = range(10, 20)\r\n dataset = DictDataset(x=a, y=b)\r\n print(dataset[0:5])\r\n\r\nif __name__ == '__main__':\r\n main()\r\n```\r\n\r\nIn Python 3.6.1,\r\n```sh\r\n$ python --version\r\n Python 3.6.1\r\n$ python poc.py\r\n Traceback (most recent call last):\r\n File \"poc.py\", line 12, in <module>\r\n main()\r\n File \"poc.py\", line 8, in main\r\n print(dataset[0:5])\r\n File \"/home/igarashi/projects/chainer/chainer/datasets/dict_dataset.py\", line 34, in __getitem__\r\n length = len(six.itervalues(batches).next())\r\nAttributeError: 'dict_valueiterator' object has no attribute 'next'\r\n```\r\n\r\nIn Python 2.7.13, \r\n```sh\r\n$ python --version\r\n Python 2.7.13\r\n$ python poc.py\r\n [{'y': 10, 'x': 0}, {'y': 11, 'x': 1}, {'y': 12, 'x': 2}, {'y': 13, 'x': 3}, {'y': 14, 'x': 4}]\r\n```\r\n\r\nIt is because an instance of `six.Iterator` doesn't have `next()` method in the Python 3 environment.\r\n[Reference](http://pythonhosted.org/six/#six.Iterator)\r\n\n", "before_files": [{"content": "import six\n\n\nclass DictDataset(object):\n\n \"\"\"Dataset of a dictionary of datasets.\n\n It combines multiple datasets into one dataset. Each example is represented\n by a dictionary mapping a key to an example of the corresponding dataset.\n\n Args:\n datasets: Underlying datasets. The keys are used as the keys of each\n example. All datasets must have the same length.\n\n \"\"\"\n\n def __init__(self, **datasets):\n if not datasets:\n raise ValueError('no datasets are given')\n length = None\n for key, dataset in six.iteritems(datasets):\n if length is None:\n length = len(dataset)\n elif length != len(dataset):\n raise ValueError(\n 'dataset length conflicts at \"{}\"'.format(key))\n self._datasets = datasets\n self._length = length\n\n def __getitem__(self, index):\n batches = {key: dataset[index]\n for key, dataset in six.iteritems(self._datasets)}\n if isinstance(index, slice):\n length = len(six.itervalues(batches).next())\n return [{key: batch[i] for key, batch in six.iteritems(batches)}\n for i in six.moves.range(length)]\n else:\n return batches\n\n def __len__(self):\n return self._length\n", "path": "chainer/datasets/dict_dataset.py"}]} | 1,287 | 146 |
gh_patches_debug_35186 | rasdani/github-patches | git_diff | vnpy__vnpy-1795 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug:rqdata.py 中 RqdataClient 类的 to_rq_symbol 方法对连续和指数合约转换有问题
## 环境
* 操作系统: 如Windows 10
* Anaconda版本: Anaconda 18.12 Python 3.7 64位
* vn.py版本: v2.0.3
## Issue类型
三选一:Bug
## 预期程序行为
正确将合约名转换至rqdata中的合约名
## 实际程序行为
错误的将郑商所的合约连续和指数合约转换,例如将AP888会转换为AP2888,AP99会转换至AP199导致无法下载到数据。
## 重现步骤
在回测模块中下载AP88数据即可。
针对Bug类型Issue,请提供具体重现步骤以及报错截图
</issue>
<code>
[start of vnpy/trader/rqdata.py]
1 from datetime import datetime, timedelta
2 from typing import List
3
4 from rqdatac import init as rqdata_init
5 from rqdatac.services.basic import all_instruments as rqdata_all_instruments
6 from rqdatac.services.get_price import get_price as rqdata_get_price
7
8 from .setting import SETTINGS
9 from .constant import Exchange, Interval
10 from .object import BarData, HistoryRequest
11
12
13 INTERVAL_VT2RQ = {
14 Interval.MINUTE: "1m",
15 Interval.HOUR: "60m",
16 Interval.DAILY: "1d",
17 }
18
19 INTERVAL_ADJUSTMENT_MAP = {
20 Interval.MINUTE: timedelta(minutes=1),
21 Interval.HOUR: timedelta(hours=1),
22 Interval.DAILY: timedelta() # no need to adjust for daily bar
23 }
24
25
26 class RqdataClient:
27 """
28 Client for querying history data from RQData.
29 """
30
31 def __init__(self):
32 """"""
33 self.username = SETTINGS["rqdata.username"]
34 self.password = SETTINGS["rqdata.password"]
35
36 self.inited = False
37 self.symbols = set()
38
39 def init(self):
40 """"""
41 if self.inited:
42 return True
43
44 if not self.username or not self.password:
45 return False
46
47 rqdata_init(self.username, self.password,
48 ('rqdatad-pro.ricequant.com', 16011))
49
50 try:
51 df = rqdata_all_instruments(date=datetime.now())
52 for ix, row in df.iterrows():
53 self.symbols.add(row['order_book_id'])
54 except RuntimeError:
55 return False
56
57 self.inited = True
58 return True
59
60 def to_rq_symbol(self, symbol: str, exchange: Exchange):
61 """
62 CZCE product of RQData has symbol like "TA1905" while
63 vt symbol is "TA905.CZCE" so need to add "1" in symbol.
64 """
65 if exchange in [Exchange.SSE, Exchange.SZSE]:
66 if exchange == Exchange.SSE:
67 rq_symbol = f"{symbol}.XSHG"
68 else:
69 rq_symbol = f"{symbol}.XSHE"
70 else:
71 if exchange is not Exchange.CZCE:
72 return symbol.upper()
73
74 for count, word in enumerate(symbol):
75 if word.isdigit():
76 break
77
78 # noinspection PyUnboundLocalVariable
79 product = symbol[:count]
80 year = symbol[count]
81 month = symbol[count + 1:]
82
83 if year == "9":
84 year = "1" + year
85 else:
86 year = "2" + year
87
88 rq_symbol = f"{product}{year}{month}".upper()
89
90 return rq_symbol
91
92 def query_history(self, req: HistoryRequest):
93 """
94 Query history bar data from RQData.
95 """
96 symbol = req.symbol
97 exchange = req.exchange
98 interval = req.interval
99 start = req.start
100 end = req.end
101
102 rq_symbol = self.to_rq_symbol(symbol, exchange)
103 if rq_symbol not in self.symbols:
104 return None
105
106 rq_interval = INTERVAL_VT2RQ.get(interval)
107 if not rq_interval:
108 return None
109
110 # For adjust timestamp from bar close point (RQData) to open point (VN Trader)
111 adjustment = INTERVAL_ADJUSTMENT_MAP[interval]
112
113 # For querying night trading period data
114 end += timedelta(1)
115
116 df = rqdata_get_price(
117 rq_symbol,
118 frequency=rq_interval,
119 fields=["open", "high", "low", "close", "volume"],
120 start_date=start,
121 end_date=end
122 )
123
124 data: List[BarData] = []
125 for ix, row in df.iterrows():
126 bar = BarData(
127 symbol=symbol,
128 exchange=exchange,
129 interval=interval,
130 datetime=row.name.to_pydatetime() - adjustment,
131 open_price=row["open"],
132 high_price=row["high"],
133 low_price=row["low"],
134 close_price=row["close"],
135 volume=row["volume"],
136 gateway_name="RQ"
137 )
138 data.append(bar)
139
140 return data
141
142
143 rqdata_client = RqdataClient()
144
[end of vnpy/trader/rqdata.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vnpy/trader/rqdata.py b/vnpy/trader/rqdata.py
--- a/vnpy/trader/rqdata.py
+++ b/vnpy/trader/rqdata.py
@@ -36,11 +36,15 @@
self.inited = False
self.symbols = set()
- def init(self):
+ def init(self, username="", password=""):
""""""
if self.inited:
return True
+ if username and password:
+ self.username = username
+ self.password = password
+
if not self.username or not self.password:
return False
@@ -75,6 +79,11 @@
if word.isdigit():
break
+ # Check for index symbol
+ time_str = symbol[count:]
+ if time_str in ["88", "888", "99"]:
+ return symbol
+
# noinspection PyUnboundLocalVariable
product = symbol[:count]
year = symbol[count]
@@ -118,24 +127,27 @@
frequency=rq_interval,
fields=["open", "high", "low", "close", "volume"],
start_date=start,
- end_date=end
+ end_date=end,
+ adjust_type="none"
)
data: List[BarData] = []
- for ix, row in df.iterrows():
- bar = BarData(
- symbol=symbol,
- exchange=exchange,
- interval=interval,
- datetime=row.name.to_pydatetime() - adjustment,
- open_price=row["open"],
- high_price=row["high"],
- low_price=row["low"],
- close_price=row["close"],
- volume=row["volume"],
- gateway_name="RQ"
- )
- data.append(bar)
+
+ if df is not None:
+ for ix, row in df.iterrows():
+ bar = BarData(
+ symbol=symbol,
+ exchange=exchange,
+ interval=interval,
+ datetime=row.name.to_pydatetime() - adjustment,
+ open_price=row["open"],
+ high_price=row["high"],
+ low_price=row["low"],
+ close_price=row["close"],
+ volume=row["volume"],
+ gateway_name="RQ"
+ )
+ data.append(bar)
return data
| {"golden_diff": "diff --git a/vnpy/trader/rqdata.py b/vnpy/trader/rqdata.py\n--- a/vnpy/trader/rqdata.py\n+++ b/vnpy/trader/rqdata.py\n@@ -36,11 +36,15 @@\n self.inited = False\n self.symbols = set()\n \n- def init(self):\n+ def init(self, username=\"\", password=\"\"):\n \"\"\"\"\"\"\n if self.inited:\n return True\n \n+ if username and password:\n+ self.username = username\n+ self.password = password\n+\n if not self.username or not self.password:\n return False\n \n@@ -75,6 +79,11 @@\n if word.isdigit():\n break\n \n+ # Check for index symbol\n+ time_str = symbol[count:]\n+ if time_str in [\"88\", \"888\", \"99\"]:\n+ return symbol\n+\n # noinspection PyUnboundLocalVariable\n product = symbol[:count]\n year = symbol[count]\n@@ -118,24 +127,27 @@\n frequency=rq_interval,\n fields=[\"open\", \"high\", \"low\", \"close\", \"volume\"],\n start_date=start,\n- end_date=end\n+ end_date=end,\n+ adjust_type=\"none\"\n )\n \n data: List[BarData] = []\n- for ix, row in df.iterrows():\n- bar = BarData(\n- symbol=symbol,\n- exchange=exchange,\n- interval=interval,\n- datetime=row.name.to_pydatetime() - adjustment,\n- open_price=row[\"open\"],\n- high_price=row[\"high\"],\n- low_price=row[\"low\"],\n- close_price=row[\"close\"],\n- volume=row[\"volume\"],\n- gateway_name=\"RQ\"\n- )\n- data.append(bar)\n+\n+ if df is not None:\n+ for ix, row in df.iterrows():\n+ bar = BarData(\n+ symbol=symbol,\n+ exchange=exchange,\n+ interval=interval,\n+ datetime=row.name.to_pydatetime() - adjustment,\n+ open_price=row[\"open\"],\n+ high_price=row[\"high\"],\n+ low_price=row[\"low\"],\n+ close_price=row[\"close\"],\n+ volume=row[\"volume\"],\n+ gateway_name=\"RQ\"\n+ )\n+ data.append(bar)\n \n return data\n", "issue": "Bug\uff1arqdata.py \u4e2d RqdataClient \u7c7b\u7684 to_rq_symbol \u65b9\u6cd5\u5bf9\u8fde\u7eed\u548c\u6307\u6570\u5408\u7ea6\u8f6c\u6362\u6709\u95ee\u9898\n## \u73af\u5883\r\n\r\n* \u64cd\u4f5c\u7cfb\u7edf: \u5982Windows 10\r\n* Anaconda\u7248\u672c: Anaconda 18.12 Python 3.7 64\u4f4d\r\n* vn.py\u7248\u672c: v2.0.3\r\n\r\n## Issue\u7c7b\u578b\r\n\u4e09\u9009\u4e00\uff1aBug\r\n\r\n## \u9884\u671f\u7a0b\u5e8f\u884c\u4e3a\r\n\u6b63\u786e\u5c06\u5408\u7ea6\u540d\u8f6c\u6362\u81f3rqdata\u4e2d\u7684\u5408\u7ea6\u540d\r\n\r\n## \u5b9e\u9645\u7a0b\u5e8f\u884c\u4e3a\r\n\u9519\u8bef\u7684\u5c06\u90d1\u5546\u6240\u7684\u5408\u7ea6\u8fde\u7eed\u548c\u6307\u6570\u5408\u7ea6\u8f6c\u6362\uff0c\u4f8b\u5982\u5c06AP888\u4f1a\u8f6c\u6362\u4e3aAP2888\uff0cAP99\u4f1a\u8f6c\u6362\u81f3AP199\u5bfc\u81f4\u65e0\u6cd5\u4e0b\u8f7d\u5230\u6570\u636e\u3002\r\n\r\n## \u91cd\u73b0\u6b65\u9aa4\r\n\u5728\u56de\u6d4b\u6a21\u5757\u4e2d\u4e0b\u8f7dAP88\u6570\u636e\u5373\u53ef\u3002\r\n\r\n\u9488\u5bf9Bug\u7c7b\u578bIssue\uff0c\u8bf7\u63d0\u4f9b\u5177\u4f53\u91cd\u73b0\u6b65\u9aa4\u4ee5\u53ca\u62a5\u9519\u622a\u56fe\r\n\r\n\r\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\nfrom typing import List\n\nfrom rqdatac import init as rqdata_init\nfrom rqdatac.services.basic import all_instruments as rqdata_all_instruments\nfrom rqdatac.services.get_price import get_price as rqdata_get_price\n\nfrom .setting import SETTINGS\nfrom .constant import Exchange, Interval\nfrom .object import BarData, HistoryRequest\n\n\nINTERVAL_VT2RQ = {\n Interval.MINUTE: \"1m\",\n Interval.HOUR: \"60m\",\n Interval.DAILY: \"1d\",\n}\n\nINTERVAL_ADJUSTMENT_MAP = {\n Interval.MINUTE: timedelta(minutes=1),\n Interval.HOUR: timedelta(hours=1),\n Interval.DAILY: timedelta() # no need to adjust for daily bar\n}\n\n\nclass RqdataClient:\n \"\"\"\n Client for querying history data from RQData.\n \"\"\"\n\n def __init__(self):\n \"\"\"\"\"\"\n self.username = SETTINGS[\"rqdata.username\"]\n self.password = SETTINGS[\"rqdata.password\"]\n\n self.inited = False\n self.symbols = set()\n\n def init(self):\n \"\"\"\"\"\"\n if self.inited:\n return True\n\n if not self.username or not self.password:\n return False\n\n rqdata_init(self.username, self.password,\n ('rqdatad-pro.ricequant.com', 16011))\n\n try:\n df = rqdata_all_instruments(date=datetime.now())\n for ix, row in df.iterrows():\n self.symbols.add(row['order_book_id'])\n except RuntimeError:\n return False\n\n self.inited = True\n return True\n\n def to_rq_symbol(self, symbol: str, exchange: Exchange):\n \"\"\"\n CZCE product of RQData has symbol like \"TA1905\" while\n vt symbol is \"TA905.CZCE\" so need to add \"1\" in symbol.\n \"\"\"\n if exchange in [Exchange.SSE, Exchange.SZSE]:\n if exchange == Exchange.SSE:\n rq_symbol = f\"{symbol}.XSHG\"\n else:\n rq_symbol = f\"{symbol}.XSHE\"\n else:\n if exchange is not Exchange.CZCE:\n return symbol.upper()\n\n for count, word in enumerate(symbol):\n if word.isdigit():\n break\n\n # noinspection PyUnboundLocalVariable\n product = symbol[:count]\n year = symbol[count]\n month = symbol[count + 1:]\n\n if year == \"9\":\n year = \"1\" + year\n else:\n year = \"2\" + year\n\n rq_symbol = f\"{product}{year}{month}\".upper()\n\n return rq_symbol\n\n def query_history(self, req: HistoryRequest):\n \"\"\"\n Query history bar data from RQData.\n \"\"\"\n symbol = req.symbol\n exchange = req.exchange\n interval = req.interval\n start = req.start\n end = req.end\n\n rq_symbol = self.to_rq_symbol(symbol, exchange)\n if rq_symbol not in self.symbols:\n return None\n\n rq_interval = INTERVAL_VT2RQ.get(interval)\n if not rq_interval:\n return None\n\n # For adjust timestamp from bar close point (RQData) to open point (VN Trader)\n adjustment = INTERVAL_ADJUSTMENT_MAP[interval]\n\n # For querying night trading period data\n end += timedelta(1)\n\n df = rqdata_get_price(\n rq_symbol,\n frequency=rq_interval,\n fields=[\"open\", \"high\", \"low\", \"close\", \"volume\"],\n start_date=start,\n end_date=end\n )\n\n data: List[BarData] = []\n for ix, row in df.iterrows():\n bar = BarData(\n symbol=symbol,\n exchange=exchange,\n interval=interval,\n datetime=row.name.to_pydatetime() - adjustment,\n open_price=row[\"open\"],\n high_price=row[\"high\"],\n low_price=row[\"low\"],\n close_price=row[\"close\"],\n volume=row[\"volume\"],\n gateway_name=\"RQ\"\n )\n data.append(bar)\n\n return data\n\n\nrqdata_client = RqdataClient()\n", "path": "vnpy/trader/rqdata.py"}]} | 1,966 | 528 |
gh_patches_debug_8600 | rasdani/github-patches | git_diff | marshmallow-code__webargs-356 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: webargs 5.0 introduces incompatibility with Flask < 1.0
https://github.com/marshmallow-code/webargs/blob/5.0.0/webargs/flaskparser.py#L63
`_get_data_for_json` is only available since Flask >= 1.0
for Flask < 1.0, there is an error as follows:
```
File "/usr/local/lib/python2.7/site-packages/webargs/flaskparser.py", line 63, in parse_json
data = req._get_data_for_json(cache=True)
File "/usr/local/lib/python2.7/site-packages/werkzeug/local.py", line 347, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: 'Request' object has no attribute '_get_data_for_json'
```
I had to downgrade webargs to 4.4.1 to get it work.
So you need to update this framework requirement https://github.com/marshmallow-code/webargs/blob/dev/setup.py#L11 or update the code for the backward compatibility.
IMHO, using `_get_data_for_json` should be avoided because it's considered private and can be changed/removed anytime.
</issue>
<code>
[start of webargs/flaskparser.py]
1 # -*- coding: utf-8 -*-
2 """Flask request argument parsing module.
3
4 Example: ::
5
6 from flask import Flask
7
8 from webargs import fields
9 from webargs.flaskparser import use_args
10
11 app = Flask(__name__)
12
13 hello_args = {
14 'name': fields.Str(required=True)
15 }
16
17 @app.route('/')
18 @use_args(hello_args)
19 def index(args):
20 return 'Hello ' + args['name']
21 """
22 import flask
23 from werkzeug.exceptions import HTTPException
24
25 from webargs import core
26 from webargs.core import json
27
28
29 def abort(http_status_code, exc=None, **kwargs):
30 """Raise a HTTPException for the given http_status_code. Attach any keyword
31 arguments to the exception for later processing.
32
33 From Flask-Restful. See NOTICE file for license information.
34 """
35 try:
36 flask.abort(http_status_code)
37 except HTTPException as err:
38 err.data = kwargs
39 err.exc = exc
40 raise err
41
42
43 def is_json_request(req):
44 return core.is_json(req.mimetype)
45
46
47 class FlaskParser(core.Parser):
48 """Flask request argument parser."""
49
50 __location_map__ = dict(view_args="parse_view_args", **core.Parser.__location_map__)
51
52 def parse_view_args(self, req, name, field):
53 """Pull a value from the request's ``view_args``."""
54 return core.get_value(req.view_args, name, field)
55
56 def parse_json(self, req, name, field):
57 """Pull a json value from the request."""
58 json_data = self._cache.get("json")
59 if json_data is None:
60 # We decode the json manually here instead of
61 # using req.get_json() so that we can handle
62 # JSONDecodeErrors consistently
63 data = req._get_data_for_json(cache=True)
64 try:
65 self._cache["json"] = json_data = core.parse_json(data)
66 except json.JSONDecodeError as e:
67 if e.doc == "":
68 return core.missing
69 else:
70 return self.handle_invalid_json_error(e, req)
71 return core.get_value(json_data, name, field, allow_many_nested=True)
72
73 def parse_querystring(self, req, name, field):
74 """Pull a querystring value from the request."""
75 return core.get_value(req.args, name, field)
76
77 def parse_form(self, req, name, field):
78 """Pull a form value from the request."""
79 try:
80 return core.get_value(req.form, name, field)
81 except AttributeError:
82 pass
83 return core.missing
84
85 def parse_headers(self, req, name, field):
86 """Pull a value from the header data."""
87 return core.get_value(req.headers, name, field)
88
89 def parse_cookies(self, req, name, field):
90 """Pull a value from the cookiejar."""
91 return core.get_value(req.cookies, name, field)
92
93 def parse_files(self, req, name, field):
94 """Pull a file from the request."""
95 return core.get_value(req.files, name, field)
96
97 def handle_error(self, error, req, schema, error_status_code, error_headers):
98 """Handles errors during parsing. Aborts the current HTTP request and
99 responds with a 422 error.
100 """
101 status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS
102 abort(
103 status_code,
104 exc=error,
105 messages=error.messages,
106 schema=schema,
107 headers=error_headers,
108 )
109
110 def handle_invalid_json_error(self, error, req, *args, **kwargs):
111 abort(400, exc=error, messages={"json": ["Invalid JSON body."]})
112
113 def get_default_request(self):
114 """Override to use Flask's thread-local request objec by default"""
115 return flask.request
116
117
118 parser = FlaskParser()
119 use_args = parser.use_args
120 use_kwargs = parser.use_kwargs
121
[end of webargs/flaskparser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/webargs/flaskparser.py b/webargs/flaskparser.py
--- a/webargs/flaskparser.py
+++ b/webargs/flaskparser.py
@@ -60,7 +60,7 @@
# We decode the json manually here instead of
# using req.get_json() so that we can handle
# JSONDecodeErrors consistently
- data = req._get_data_for_json(cache=True)
+ data = req.get_data(cache=True)
try:
self._cache["json"] = json_data = core.parse_json(data)
except json.JSONDecodeError as e:
| {"golden_diff": "diff --git a/webargs/flaskparser.py b/webargs/flaskparser.py\n--- a/webargs/flaskparser.py\n+++ b/webargs/flaskparser.py\n@@ -60,7 +60,7 @@\n # We decode the json manually here instead of\n # using req.get_json() so that we can handle\n # JSONDecodeErrors consistently\n- data = req._get_data_for_json(cache=True)\n+ data = req.get_data(cache=True)\n try:\n self._cache[\"json\"] = json_data = core.parse_json(data)\n except json.JSONDecodeError as e:\n", "issue": "bug: webargs 5.0 introduces incompatibility with Flask < 1.0\nhttps://github.com/marshmallow-code/webargs/blob/5.0.0/webargs/flaskparser.py#L63\r\n\r\n`_get_data_for_json` is only available since Flask >= 1.0\r\n\r\nfor Flask < 1.0, there is an error as follows:\r\n\r\n```\r\n File \"/usr/local/lib/python2.7/site-packages/webargs/flaskparser.py\", line 63, in parse_json\r\n data = req._get_data_for_json(cache=True)\r\n File \"/usr/local/lib/python2.7/site-packages/werkzeug/local.py\", line 347, in __getattr__\r\n return getattr(self._get_current_object(), name)\r\nAttributeError: 'Request' object has no attribute '_get_data_for_json'\r\n```\r\n\r\nI had to downgrade webargs to 4.4.1 to get it work.\r\n\r\nSo you need to update this framework requirement https://github.com/marshmallow-code/webargs/blob/dev/setup.py#L11 or update the code for the backward compatibility.\r\n\r\nIMHO, using `_get_data_for_json` should be avoided because it's considered private and can be changed/removed anytime.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Flask request argument parsing module.\n\nExample: ::\n\n from flask import Flask\n\n from webargs import fields\n from webargs.flaskparser import use_args\n\n app = Flask(__name__)\n\n hello_args = {\n 'name': fields.Str(required=True)\n }\n\n @app.route('/')\n @use_args(hello_args)\n def index(args):\n return 'Hello ' + args['name']\n\"\"\"\nimport flask\nfrom werkzeug.exceptions import HTTPException\n\nfrom webargs import core\nfrom webargs.core import json\n\n\ndef abort(http_status_code, exc=None, **kwargs):\n \"\"\"Raise a HTTPException for the given http_status_code. Attach any keyword\n arguments to the exception for later processing.\n\n From Flask-Restful. See NOTICE file for license information.\n \"\"\"\n try:\n flask.abort(http_status_code)\n except HTTPException as err:\n err.data = kwargs\n err.exc = exc\n raise err\n\n\ndef is_json_request(req):\n return core.is_json(req.mimetype)\n\n\nclass FlaskParser(core.Parser):\n \"\"\"Flask request argument parser.\"\"\"\n\n __location_map__ = dict(view_args=\"parse_view_args\", **core.Parser.__location_map__)\n\n def parse_view_args(self, req, name, field):\n \"\"\"Pull a value from the request's ``view_args``.\"\"\"\n return core.get_value(req.view_args, name, field)\n\n def parse_json(self, req, name, field):\n \"\"\"Pull a json value from the request.\"\"\"\n json_data = self._cache.get(\"json\")\n if json_data is None:\n # We decode the json manually here instead of\n # using req.get_json() so that we can handle\n # JSONDecodeErrors consistently\n data = req._get_data_for_json(cache=True)\n try:\n self._cache[\"json\"] = json_data = core.parse_json(data)\n except json.JSONDecodeError as e:\n if e.doc == \"\":\n return core.missing\n else:\n return self.handle_invalid_json_error(e, req)\n return core.get_value(json_data, name, field, allow_many_nested=True)\n\n def parse_querystring(self, req, name, field):\n \"\"\"Pull a querystring value from the request.\"\"\"\n return core.get_value(req.args, name, field)\n\n def parse_form(self, req, name, field):\n \"\"\"Pull a form value from the request.\"\"\"\n try:\n return core.get_value(req.form, name, field)\n except AttributeError:\n pass\n return core.missing\n\n def parse_headers(self, req, name, field):\n \"\"\"Pull a value from the header data.\"\"\"\n return core.get_value(req.headers, name, field)\n\n def parse_cookies(self, req, name, field):\n \"\"\"Pull a value from the cookiejar.\"\"\"\n return core.get_value(req.cookies, name, field)\n\n def parse_files(self, req, name, field):\n \"\"\"Pull a file from the request.\"\"\"\n return core.get_value(req.files, name, field)\n\n def handle_error(self, error, req, schema, error_status_code, error_headers):\n \"\"\"Handles errors during parsing. Aborts the current HTTP request and\n responds with a 422 error.\n \"\"\"\n status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS\n abort(\n status_code,\n exc=error,\n messages=error.messages,\n schema=schema,\n headers=error_headers,\n )\n\n def handle_invalid_json_error(self, error, req, *args, **kwargs):\n abort(400, exc=error, messages={\"json\": [\"Invalid JSON body.\"]})\n\n def get_default_request(self):\n \"\"\"Override to use Flask's thread-local request objec by default\"\"\"\n return flask.request\n\n\nparser = FlaskParser()\nuse_args = parser.use_args\nuse_kwargs = parser.use_kwargs\n", "path": "webargs/flaskparser.py"}]} | 1,894 | 131 |
gh_patches_debug_12423 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-284 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
API tests using wrong tables database
**Describe the bug**
The django API tests are running on the `test_mathesar_db_test_database`, which differs from the `mathesar_db_test_database` tables database we should be using. As a result, we don't have a proper reference to the database being used by the API functions, which prevents us from certain operations like installing types for a test.
**Expected behavior**
We should ensure `pytest-django` doesn't build a separate tables database.
**Additional context**
Currently blocking #276
</issue>
<code>
[start of config/settings.py]
1 """
2 Django settings for config project.
3
4 Generated by 'django-admin startproject' using Django 3.1.7.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.1/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/3.1/ref/settings/
11 """
12
13 import os
14 from pathlib import Path
15
16 from decouple import Csv, config as decouple_config
17 from dj_database_url import parse as db_url
18
19 # Build paths inside the project like this: BASE_DIR / 'subdir'.
20 BASE_DIR = Path(__file__).resolve().parent.parent
21
22 # Application definition
23
24 INSTALLED_APPS = [
25 "django.contrib.admin",
26 "django.contrib.auth",
27 "django.contrib.contenttypes",
28 "django.contrib.sessions",
29 "django.contrib.messages",
30 "django.contrib.staticfiles",
31 "rest_framework",
32 "django_filters",
33 "django_property_filter",
34 "mathesar",
35 ]
36
37 MIDDLEWARE = [
38 "django.middleware.security.SecurityMiddleware",
39 "django.contrib.sessions.middleware.SessionMiddleware",
40 "django.middleware.common.CommonMiddleware",
41 "django.middleware.csrf.CsrfViewMiddleware",
42 "django.contrib.auth.middleware.AuthenticationMiddleware",
43 "django.contrib.messages.middleware.MessageMiddleware",
44 "django.middleware.clickjacking.XFrameOptionsMiddleware",
45 ]
46
47 ROOT_URLCONF = "config.urls"
48
49 TEMPLATES = [
50 {
51 "BACKEND": "django.template.backends.django.DjangoTemplates",
52 "DIRS": [],
53 "APP_DIRS": True,
54 "OPTIONS": {
55 "context_processors": [
56 "config.context_processors.get_settings",
57 "django.template.context_processors.debug",
58 "django.template.context_processors.request",
59 "django.contrib.auth.context_processors.auth",
60 "django.contrib.messages.context_processors.messages",
61 ],
62 },
63 },
64 ]
65
66 WSGI_APPLICATION = "config.wsgi.application"
67
68 # Database
69 # https://docs.djangoproject.com/en/3.1/ref/settings/#databases
70
71 # TODO: Add to documentation that database keys should not be than 128 characters.
72 DATABASES = {
73 decouple_config('DJANGO_DATABASE_KEY'): decouple_config('DJANGO_DATABASE_URL', cast=db_url),
74 decouple_config('MATHESAR_DATABASE_KEY'): decouple_config('MATHESAR_DATABASE_URL', cast=db_url)
75 }
76
77
78 # Quick-start development settings - unsuitable for production
79 # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
80
81 # SECURITY WARNING: keep the secret key used in production secret!
82 SECRET_KEY = decouple_config('SECRET_KEY')
83
84 # SECURITY WARNING: don't run with debug turned on in production!
85 DEBUG = decouple_config('DEBUG', default=False, cast=bool)
86
87 ALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())
88
89 # Password validation
90 # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
91
92 AUTH_PASSWORD_VALIDATORS = [
93 {
94 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
95 },
96 {
97 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
98 },
99 {
100 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
101 },
102 {
103 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
104 },
105 ]
106
107
108 # Internationalization
109 # https://docs.djangoproject.com/en/3.1/topics/i18n/
110
111 LANGUAGE_CODE = "en-us"
112
113 TIME_ZONE = "UTC"
114
115 USE_I18N = True
116
117 USE_L10N = True
118
119 USE_TZ = True
120
121
122 # Static files (CSS, JavaScript, Images)
123 # https://docs.djangoproject.com/en/3.1/howto/static-files/
124
125 STATIC_URL = "/static/"
126
127 CLIENT_DEV_URL = "http://localhost:3000"
128
129
130 # Media files (uploaded by the user)
131
132 MEDIA_ROOT = os.path.join(BASE_DIR, '.media/')
133
134 MEDIA_URL = "/media/"
135
[end of config/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/config/settings.py b/config/settings.py
--- a/config/settings.py
+++ b/config/settings.py
@@ -74,6 +74,13 @@
decouple_config('MATHESAR_DATABASE_KEY'): decouple_config('MATHESAR_DATABASE_URL', cast=db_url)
}
+# pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'
+# and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']
+if decouple_config('TEST', default=False, cast=bool):
+ DATABASES[decouple_config('MATHESAR_DATABASE_KEY')]['TEST'] = {
+ 'NAME': DATABASES[decouple_config('MATHESAR_DATABASE_KEY')]['NAME']
+ }
+
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
| {"golden_diff": "diff --git a/config/settings.py b/config/settings.py\n--- a/config/settings.py\n+++ b/config/settings.py\n@@ -74,6 +74,13 @@\n decouple_config('MATHESAR_DATABASE_KEY'): decouple_config('MATHESAR_DATABASE_URL', cast=db_url)\n }\n \n+# pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'\n+# and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']\n+if decouple_config('TEST', default=False, cast=bool):\n+ DATABASES[decouple_config('MATHESAR_DATABASE_KEY')]['TEST'] = {\n+ 'NAME': DATABASES[decouple_config('MATHESAR_DATABASE_KEY')]['NAME']\n+ }\n+\n \n # Quick-start development settings - unsuitable for production\n # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n", "issue": "API tests using wrong tables database\n**Describe the bug**\r\nThe django API tests are running on the `test_mathesar_db_test_database`, which differs from the `mathesar_db_test_database` tables database we should be using. As a result, we don't have a proper reference to the database being used by the API functions, which prevents us from certain operations like installing types for a test. \r\n\r\n**Expected behavior**\r\nWe should ensure `pytest-django` doesn't build a separate tables database.\r\n\r\n**Additional context**\r\nCurrently blocking #276 \r\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for config project.\n\nGenerated by 'django-admin startproject' using Django 3.1.7.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nimport os\nfrom pathlib import Path\n\nfrom decouple import Csv, config as decouple_config\nfrom dj_database_url import parse as db_url\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n \"django_filters\",\n \"django_property_filter\",\n \"mathesar\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"config.context_processors.get_settings\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\n# TODO: Add to documentation that database keys should not be than 128 characters.\nDATABASES = {\n decouple_config('DJANGO_DATABASE_KEY'): decouple_config('DJANGO_DATABASE_URL', cast=db_url),\n decouple_config('MATHESAR_DATABASE_KEY'): decouple_config('MATHESAR_DATABASE_URL', cast=db_url)\n}\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = decouple_config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = decouple_config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nCLIENT_DEV_URL = \"http://localhost:3000\"\n\n\n# Media files (uploaded by the user)\n\nMEDIA_ROOT = os.path.join(BASE_DIR, '.media/')\n\nMEDIA_URL = \"/media/\"\n", "path": "config/settings.py"}]} | 1,791 | 202 |
gh_patches_debug_14759 | rasdani/github-patches | git_diff | saulpw__visidata-1901 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Keystroke ] not detected on Windows
In Powershell and cmd.exe I encountered that sorting didn't work in both orders. The `[` shortcut was detected and had its effect, but the `]` didn't. I narrowed it down to a problem with `windows-curses`, and in turn with its dependency `PDCurses`: https://github.com/zephyrproject-rtos/windows-curses/issues/41
Here's my plan on how to address it. I hope I'll get around to it somewhere next week.
- [ ] Improve the mapping in `PDCurses` and submit a pull request
- [ ] Bump the git submodule in `windows-curses` to the `PDCurses` version that has the fix and ask/wait for a release of this package
- [ ] Address the issue in this repository, perhaps by pinning `windows-curses` to a version of at least the newly released package.
I'm making this issue here just to document it and track progress. If you're reading this because you have this issue, I would recommend using WSL instead. (WSL is not an option for me unfortunately).
I didn't include the `.vd`-file to reproduce this issue. The simplest way to reproduce it is to get a Windows computer, run `visidata` from Powershell or cmd.exe and sort any column by pressing `]`.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2
3 from setuptools import setup
4 # tox can't actually run python3 setup.py: https://github.com/tox-dev/tox/issues/96
5 #from visidata import __version__
6 __version__ = '2.12dev'
7
8 setup(name='visidata',
9 version=__version__,
10 description='terminal interface for exploring and arranging tabular data',
11 long_description=open('README.md').read(),
12 long_description_content_type='text/markdown',
13 author='Saul Pwanson',
14 python_requires='>=3.7',
15 author_email='[email protected]',
16 url='https://visidata.org',
17 download_url='https://github.com/saulpw/visidata/tarball/' + __version__,
18 scripts=['bin/vd'],
19 entry_points={'console_scripts': [
20 'visidata=visidata.main:vd_cli'
21 ],
22 },
23 py_modules=['visidata'],
24 install_requires=[
25 'python-dateutil',
26 'windows-curses; platform_system == "Windows"',
27 'importlib-metadata >= 3.6',
28 ],
29 packages=['visidata', 'visidata.loaders', 'visidata.vendor', 'visidata.tests', 'visidata.ddw', 'visidata.man', 'visidata.themes', 'visidata.features', 'visidata.experimental', 'visidata.apps', 'visidata.apps.vgit', 'visidata.apps.vdsql', 'visidata.desktop'],
30 data_files=[('share/man/man1', ['visidata/man/vd.1', 'visidata/man/visidata.1']), ('share/applications/', ['visidata/desktop/visidata.desktop'])],
31 package_data={'visidata.man': ['vd.1', 'vd.txt'], 'visidata.ddw': ['input.ddw'], 'visidata.tests': ['sample.tsv'], 'visidata.desktop': ['visidata.desktop']},
32 license='GPLv3',
33 classifiers=[
34 'Development Status :: 5 - Production/Stable',
35 'Environment :: Console',
36 'Environment :: Console :: Curses',
37 'Intended Audience :: Developers',
38 'Intended Audience :: Science/Research',
39 'Intended Audience :: System Administrators',
40 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
41 'Operating System :: OS Independent',
42 'Programming Language :: Python :: 3',
43 'Topic :: Database :: Front-Ends',
44 'Topic :: Scientific/Engineering',
45 'Topic :: Office/Business :: Financial :: Spreadsheet',
46 'Topic :: Scientific/Engineering :: Visualization',
47 'Topic :: Utilities',
48 ],
49 keywords=('console tabular data spreadsheet terminal viewer textpunk'
50 'curses csv hdf5 h5 xlsx excel tsv'),
51 )
52
53
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,7 +23,7 @@
py_modules=['visidata'],
install_requires=[
'python-dateutil',
- 'windows-curses; platform_system == "Windows"',
+ 'windows-curses<2.3.1; platform_system == "Windows"', #1841
'importlib-metadata >= 3.6',
],
packages=['visidata', 'visidata.loaders', 'visidata.vendor', 'visidata.tests', 'visidata.ddw', 'visidata.man', 'visidata.themes', 'visidata.features', 'visidata.experimental', 'visidata.apps', 'visidata.apps.vgit', 'visidata.apps.vdsql', 'visidata.desktop'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,7 +23,7 @@\n py_modules=['visidata'],\n install_requires=[\n 'python-dateutil',\n- 'windows-curses; platform_system == \"Windows\"',\n+ 'windows-curses<2.3.1; platform_system == \"Windows\"', #1841\n 'importlib-metadata >= 3.6',\n ],\n packages=['visidata', 'visidata.loaders', 'visidata.vendor', 'visidata.tests', 'visidata.ddw', 'visidata.man', 'visidata.themes', 'visidata.features', 'visidata.experimental', 'visidata.apps', 'visidata.apps.vgit', 'visidata.apps.vdsql', 'visidata.desktop'],\n", "issue": "Keystroke ] not detected on Windows\nIn Powershell and cmd.exe I encountered that sorting didn't work in both orders. The `[` shortcut was detected and had its effect, but the `]` didn't. I narrowed it down to a problem with `windows-curses`, and in turn with its dependency `PDCurses`: https://github.com/zephyrproject-rtos/windows-curses/issues/41\r\n\r\nHere's my plan on how to address it. I hope I'll get around to it somewhere next week.\r\n- [ ] Improve the mapping in `PDCurses` and submit a pull request\r\n- [ ] Bump the git submodule in `windows-curses` to the `PDCurses` version that has the fix and ask/wait for a release of this package\r\n- [ ] Address the issue in this repository, perhaps by pinning `windows-curses` to a version of at least the newly released package.\r\n\r\nI'm making this issue here just to document it and track progress. If you're reading this because you have this issue, I would recommend using WSL instead. (WSL is not an option for me unfortunately).\r\n\r\nI didn't include the `.vd`-file to reproduce this issue. The simplest way to reproduce it is to get a Windows computer, run `visidata` from Powershell or cmd.exe and sort any column by pressing `]`.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom setuptools import setup\n# tox can't actually run python3 setup.py: https://github.com/tox-dev/tox/issues/96\n#from visidata import __version__\n__version__ = '2.12dev'\n\nsetup(name='visidata',\n version=__version__,\n description='terminal interface for exploring and arranging tabular data',\n long_description=open('README.md').read(),\n long_description_content_type='text/markdown',\n author='Saul Pwanson',\n python_requires='>=3.7',\n author_email='[email protected]',\n url='https://visidata.org',\n download_url='https://github.com/saulpw/visidata/tarball/' + __version__,\n scripts=['bin/vd'],\n entry_points={'console_scripts': [\n 'visidata=visidata.main:vd_cli'\n ],\n },\n py_modules=['visidata'],\n install_requires=[\n 'python-dateutil',\n 'windows-curses; platform_system == \"Windows\"',\n 'importlib-metadata >= 3.6',\n ],\n packages=['visidata', 'visidata.loaders', 'visidata.vendor', 'visidata.tests', 'visidata.ddw', 'visidata.man', 'visidata.themes', 'visidata.features', 'visidata.experimental', 'visidata.apps', 'visidata.apps.vgit', 'visidata.apps.vdsql', 'visidata.desktop'],\n data_files=[('share/man/man1', ['visidata/man/vd.1', 'visidata/man/visidata.1']), ('share/applications/', ['visidata/desktop/visidata.desktop'])],\n package_data={'visidata.man': ['vd.1', 'vd.txt'], 'visidata.ddw': ['input.ddw'], 'visidata.tests': ['sample.tsv'], 'visidata.desktop': ['visidata.desktop']},\n license='GPLv3',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Console :: Curses',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Utilities',\n ],\n keywords=('console tabular data spreadsheet terminal viewer textpunk'\n 'curses csv hdf5 h5 xlsx excel tsv'),\n )\n\n", "path": "setup.py"}]} | 1,496 | 176 |
gh_patches_debug_18243 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-1465 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CSS bundles generation breaks background images relative urls
This is a bug related to PR #1300.
</issue>
<code>
[start of Products/CMFPlone/resources/browser/combine.py]
1 from zExceptions import NotFound
2 from Acquisition import aq_base
3 from datetime import datetime
4 from plone.registry.interfaces import IRegistry
5 from plone.resource.file import FilesystemFile
6 from plone.resource.interfaces import IResourceDirectory
7 from Products.CMFPlone.interfaces import IBundleRegistry
8 from Products.CMFPlone.interfaces.resources import (
9 OVERRIDE_RESOURCE_DIRECTORY_NAME,
10 )
11 from StringIO import StringIO
12 from zope.component import getUtility
13 from zope.component import queryUtility
14
15 PRODUCTION_RESOURCE_DIRECTORY = "production"
16
17
18 def get_production_resource_directory():
19 persistent_directory = queryUtility(IResourceDirectory, name="persistent")
20 if persistent_directory is None:
21 return ''
22 container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]
23 try:
24 production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]
25 except NotFound:
26 return "%s/++unique++1" % PRODUCTION_RESOURCE_DIRECTORY
27 timestamp = production_folder.readFile('timestamp.txt')
28 return "%s/++unique++%s" % (
29 PRODUCTION_RESOURCE_DIRECTORY, timestamp)
30
31
32 def get_resource(context, path):
33 resource = context.unrestrictedTraverse(path)
34 if isinstance(resource, FilesystemFile):
35 (directory, sep, filename) = path.rpartition('/')
36 return context.unrestrictedTraverse(directory).readFile(filename)
37 else:
38 if hasattr(aq_base(resource), 'GET'):
39 # for FileResource
40 return resource.GET()
41 else:
42 # any BrowserView
43 return resource()
44
45
46 def write_js(context, folder, meta_bundle):
47 registry = getUtility(IRegistry)
48 resources = []
49
50 # default resources
51 if meta_bundle == 'default' and registry.records.get(
52 'plone.resources/jquery.js'
53 ):
54 resources.append(get_resource(context,
55 registry.records['plone.resources/jquery.js'].value))
56 resources.append(get_resource(context,
57 registry.records['plone.resources.requirejs'].value))
58 resources.append(get_resource(context,
59 registry.records['plone.resources.configjs'].value))
60
61 # bundles
62 bundles = registry.collectionOfInterface(
63 IBundleRegistry, prefix="plone.bundles", check=False)
64 for bundle in bundles.values():
65 if bundle.merge_with == meta_bundle:
66 resources.append(get_resource(context, bundle.jscompilation))
67
68 fi = StringIO()
69 for script in resources:
70 fi.write(script + '\n')
71 folder.writeFile(meta_bundle + ".js", fi)
72
73
74 def write_css(context, folder, meta_bundle):
75 registry = getUtility(IRegistry)
76 resources = []
77
78 bundles = registry.collectionOfInterface(
79 IBundleRegistry, prefix="plone.bundles", check=False)
80 for bundle in bundles.values():
81 if bundle.merge_with == meta_bundle:
82 resources.append(get_resource(context, bundle.csscompilation))
83
84 fi = StringIO()
85 for script in resources:
86 fi.write(script + '\n')
87 folder.writeFile(meta_bundle + ".css", fi)
88
89
90 def combine_bundles(context):
91 persistent_directory = queryUtility(IResourceDirectory, name="persistent")
92 if persistent_directory is None:
93 return
94 if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:
95 persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)
96 container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]
97 if PRODUCTION_RESOURCE_DIRECTORY not in container:
98 container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)
99 production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]
100
101 # store timestamp
102 fi = StringIO()
103 fi.write(datetime.now().isoformat())
104 production_folder.writeFile("timestamp.txt", fi)
105
106 # generate new combined bundles
107 write_js(context, production_folder, 'default')
108 write_js(context, production_folder, 'logged-in')
109 write_css(context, production_folder, 'default')
110 write_css(context, production_folder, 'logged-in')
111
[end of Products/CMFPlone/resources/browser/combine.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Products/CMFPlone/resources/browser/combine.py b/Products/CMFPlone/resources/browser/combine.py
--- a/Products/CMFPlone/resources/browser/combine.py
+++ b/Products/CMFPlone/resources/browser/combine.py
@@ -1,3 +1,4 @@
+import re
from zExceptions import NotFound
from Acquisition import aq_base
from datetime import datetime
@@ -79,7 +80,15 @@
IBundleRegistry, prefix="plone.bundles", check=False)
for bundle in bundles.values():
if bundle.merge_with == meta_bundle:
- resources.append(get_resource(context, bundle.csscompilation))
+ css = get_resource(context, bundle.csscompilation)
+ # Preserve relative urls:
+ # we prefix with '../'' any url not starting with '/'
+ # or http: or data:
+ css = re.sub(
+ r"""(url\(['"]?(?!['"]?([a-z]+:|\/)))""",
+ r'\1../',
+ css)
+ resources.append(css)
fi = StringIO()
for script in resources:
| {"golden_diff": "diff --git a/Products/CMFPlone/resources/browser/combine.py b/Products/CMFPlone/resources/browser/combine.py\n--- a/Products/CMFPlone/resources/browser/combine.py\n+++ b/Products/CMFPlone/resources/browser/combine.py\n@@ -1,3 +1,4 @@\n+import re\n from zExceptions import NotFound\n from Acquisition import aq_base\n from datetime import datetime\n@@ -79,7 +80,15 @@\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle:\n- resources.append(get_resource(context, bundle.csscompilation))\n+ css = get_resource(context, bundle.csscompilation)\n+ # Preserve relative urls:\n+ # we prefix with '../'' any url not starting with '/'\n+ # or http: or data:\n+ css = re.sub(\n+ r\"\"\"(url\\(['\"]?(?!['\"]?([a-z]+:|\\/)))\"\"\",\n+ r'\\1../',\n+ css)\n+ resources.append(css)\n \n fi = StringIO()\n for script in resources:\n", "issue": "CSS bundles generation breaks background images relative urls\nThis is a bug related to PR #1300.\n\n", "before_files": [{"content": "from zExceptions import NotFound\nfrom Acquisition import aq_base\nfrom datetime import datetime\nfrom plone.registry.interfaces import IRegistry\nfrom plone.resource.file import FilesystemFile\nfrom plone.resource.interfaces import IResourceDirectory\nfrom Products.CMFPlone.interfaces import IBundleRegistry\nfrom Products.CMFPlone.interfaces.resources import (\n OVERRIDE_RESOURCE_DIRECTORY_NAME,\n)\nfrom StringIO import StringIO\nfrom zope.component import getUtility\nfrom zope.component import queryUtility\n\nPRODUCTION_RESOURCE_DIRECTORY = \"production\"\n\n\ndef get_production_resource_directory():\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return ''\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n try:\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n except NotFound:\n return \"%s/++unique++1\" % PRODUCTION_RESOURCE_DIRECTORY\n timestamp = production_folder.readFile('timestamp.txt')\n return \"%s/++unique++%s\" % (\n PRODUCTION_RESOURCE_DIRECTORY, timestamp)\n\n\ndef get_resource(context, path):\n resource = context.unrestrictedTraverse(path)\n if isinstance(resource, FilesystemFile):\n (directory, sep, filename) = path.rpartition('/')\n return context.unrestrictedTraverse(directory).readFile(filename)\n else:\n if hasattr(aq_base(resource), 'GET'):\n # for FileResource\n return resource.GET()\n else:\n # any BrowserView\n return resource()\n\n\ndef write_js(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n # default resources\n if meta_bundle == 'default' and registry.records.get(\n 'plone.resources/jquery.js'\n ):\n resources.append(get_resource(context,\n registry.records['plone.resources/jquery.js'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.requirejs'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.configjs'].value))\n\n # bundles\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle:\n resources.append(get_resource(context, bundle.jscompilation))\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".js\", fi)\n\n\ndef write_css(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle:\n resources.append(get_resource(context, bundle.csscompilation))\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".css\", fi)\n\n\ndef combine_bundles(context):\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n if PRODUCTION_RESOURCE_DIRECTORY not in container:\n container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n\n # store timestamp\n fi = StringIO()\n fi.write(datetime.now().isoformat())\n production_folder.writeFile(\"timestamp.txt\", fi)\n\n # generate new combined bundles\n write_js(context, production_folder, 'default')\n write_js(context, production_folder, 'logged-in')\n write_css(context, production_folder, 'default')\n write_css(context, production_folder, 'logged-in')\n", "path": "Products/CMFPlone/resources/browser/combine.py"}]} | 1,586 | 252 |
gh_patches_debug_2033 | rasdani/github-patches | git_diff | googleapis__python-bigquery-802 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ChunkedEncodingError is not retried when fetching data with list_rows()
Original issue: https://github.com/googleapis/python-bigquery-storage/issues/242
A user reported that they saw an error in production when fetching table data with `Client.list_rows()`. That method uses the [default retry object](https://github.com/googleapis/python-bigquery/blob/7e0e2bafc4c3f98a4246100f504fd78a01a28e7d/google/cloud/bigquery/retry.py#L49), which currently does not consider `requests.exceptions.ChunkedEncodingError` retryable.
(it does retry `requests.exceptions.ConnectionError`, but `ChunkedEncodingError` is not a subclass of that.
</issue>
<code>
[start of google/cloud/bigquery/retry.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from google.api_core import exceptions
16 from google.api_core import retry
17 from google.auth import exceptions as auth_exceptions
18 import requests.exceptions
19
20
21 _RETRYABLE_REASONS = frozenset(
22 ["rateLimitExceeded", "backendError", "internalError", "badGateway"]
23 )
24
25 _UNSTRUCTURED_RETRYABLE_TYPES = (
26 ConnectionError,
27 exceptions.TooManyRequests,
28 exceptions.InternalServerError,
29 exceptions.BadGateway,
30 requests.exceptions.ConnectionError,
31 auth_exceptions.TransportError,
32 )
33
34
35 def _should_retry(exc):
36 """Predicate for determining when to retry.
37
38 We retry if and only if the 'reason' is 'backendError'
39 or 'rateLimitExceeded'.
40 """
41 if not hasattr(exc, "errors") or len(exc.errors) == 0:
42 # Check for unstructured error returns, e.g. from GFE
43 return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)
44
45 reason = exc.errors[0]["reason"]
46 return reason in _RETRYABLE_REASONS
47
48
49 DEFAULT_RETRY = retry.Retry(predicate=_should_retry)
50 """The default retry object.
51
52 Any method with a ``retry`` parameter will be retried automatically,
53 with reasonable defaults. To disable retry, pass ``retry=None``.
54 To modify the default retry behavior, call a ``with_XXX`` method
55 on ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,
56 pass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.
57 """
58
[end of google/cloud/bigquery/retry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py
--- a/google/cloud/bigquery/retry.py
+++ b/google/cloud/bigquery/retry.py
@@ -27,6 +27,7 @@
exceptions.TooManyRequests,
exceptions.InternalServerError,
exceptions.BadGateway,
+ requests.exceptions.ChunkedEncodingError,
requests.exceptions.ConnectionError,
auth_exceptions.TransportError,
)
| {"golden_diff": "diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py\n--- a/google/cloud/bigquery/retry.py\n+++ b/google/cloud/bigquery/retry.py\n@@ -27,6 +27,7 @@\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n+ requests.exceptions.ChunkedEncodingError,\n requests.exceptions.ConnectionError,\n auth_exceptions.TransportError,\n )\n", "issue": "ChunkedEncodingError is not retried when fetching data with list_rows()\nOriginal issue: https://github.com/googleapis/python-bigquery-storage/issues/242\r\n\r\nA user reported that they saw an error in production when fetching table data with `Client.list_rows()`. That method uses the [default retry object](https://github.com/googleapis/python-bigquery/blob/7e0e2bafc4c3f98a4246100f504fd78a01a28e7d/google/cloud/bigquery/retry.py#L49), which currently does not consider `requests.exceptions.ChunkedEncodingError` retryable.\r\n\r\n(it does retry `requests.exceptions.ConnectionError`, but `ChunkedEncodingError` is not a subclass of that.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\nfrom google.auth import exceptions as auth_exceptions\nimport requests.exceptions\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n requests.exceptions.ConnectionError,\n auth_exceptions.TransportError,\n)\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py"}]} | 1,262 | 93 |
gh_patches_debug_5179 | rasdani/github-patches | git_diff | lutris__lutris-2653 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Impossible login to GOG.com: invalid cookie ?
**Describe the bug**
I can't connect Lutris with my GOG.com account: the GOG.com credentials window is here, I can fill it, but when I confirm this form, the window closes and… nothing more. My account isn't connected, and if I close the "Import games" window, I can't reopen it unless I delete '.cache/lutris' directory.
**Expected behavior**
I should have a second GOG.com form asking for a PIN code, then my account should be connected in Lutris.
**Current behavior**
As in description above. Plus technical details:
* two files are created in '~/.cache/lutris/': .gog.auth and .gog.token (attached as [gog.auth.txt](https://github.com/lutris/lutris/files/4309081/gog.auth.txt) and [gog.token.txt](https://github.com/lutris/lutris/files/4309083/gog.token.txt))
* according to standard output in terminal, some problems with Python, ending with an invalid cookie? (see [lutris.txt](https://github.com/lutris/lutris/files/4309117/lutris.txt))
**Steps to reproduce**
It happens while importing GOG games from the app or installing GOG games from the website.
**System information**
Fresh Lutris install, Arch Linux, kernel 5.5.8. More in [lutris.log](https://github.com/lutris/lutris/files/4309125/lutris.log)
</issue>
<code>
[start of lutris/util/cookies.py]
1 import time
2 from http.cookiejar import MozillaCookieJar, Cookie, _warn_unhandled_exception
3
4
5 class WebkitCookieJar(MozillaCookieJar):
6 """Subclass of MozillaCookieJar for compatibility with cookies
7 coming from Webkit2.
8 This disables the magic_re header which is not present and adds
9 compatibility with HttpOnly cookies (See http://bugs.python.org/issue2190)
10 """
11
12 def _really_load(self, f, filename, ignore_discard, ignore_expires):
13 now = time.time()
14 try:
15 while 1:
16 line = f.readline()
17 if line == "":
18 break
19
20 # last field may be absent, so keep any trailing tab
21 if line.endswith("\n"):
22 line = line[:-1]
23
24 sline = line.strip()
25 # support HttpOnly cookies (as stored by curl or old Firefox).
26 if sline.startswith("#HttpOnly_"):
27 line = sline[10:]
28 elif sline.startswith("#") or sline == "":
29 continue
30
31 domain, domain_specified, path, secure, expires, name, value = line.split(
32 "\t"
33 )
34 secure = secure == "TRUE"
35 domain_specified = domain_specified == "TRUE"
36 if name == "":
37 # cookies.txt regards 'Set-Cookie: foo' as a cookie
38 # with no name, whereas http.cookiejar regards it as a
39 # cookie with no value.
40 name = value
41 value = None
42
43 initial_dot = domain.startswith(".")
44 assert domain_specified == initial_dot
45
46 discard = False
47 if expires == "":
48 expires = None
49 discard = True
50
51 # assume path_specified is false
52 c = Cookie(
53 0,
54 name,
55 value,
56 None,
57 False,
58 domain,
59 domain_specified,
60 initial_dot,
61 path,
62 False,
63 secure,
64 expires,
65 discard,
66 None,
67 None,
68 {},
69 )
70 if not ignore_discard and c.discard:
71 continue
72 if not ignore_expires and c.is_expired(now):
73 continue
74 self.set_cookie(c)
75
76 except OSError:
77 raise
78 except Exception:
79 _warn_unhandled_exception()
80 raise OSError(
81 "invalid Netscape format cookies file %r: %r" % (filename, line)
82 )
83
[end of lutris/util/cookies.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lutris/util/cookies.py b/lutris/util/cookies.py
--- a/lutris/util/cookies.py
+++ b/lutris/util/cookies.py
@@ -28,7 +28,7 @@
elif sline.startswith("#") or sline == "":
continue
- domain, domain_specified, path, secure, expires, name, value = line.split(
+ domain, domain_specified, path, secure, expires, name, value, aditional_info = line.split(
"\t"
)
secure = secure == "TRUE"
| {"golden_diff": "diff --git a/lutris/util/cookies.py b/lutris/util/cookies.py\n--- a/lutris/util/cookies.py\n+++ b/lutris/util/cookies.py\n@@ -28,7 +28,7 @@\n elif sline.startswith(\"#\") or sline == \"\":\n continue\n \n- domain, domain_specified, path, secure, expires, name, value = line.split(\n+ domain, domain_specified, path, secure, expires, name, value, aditional_info = line.split(\n \"\\t\"\n )\n secure = secure == \"TRUE\"\n", "issue": "Impossible login to GOG.com: invalid cookie ?\n**Describe the bug**\r\n\r\nI can't connect Lutris with my GOG.com account: the GOG.com credentials window is here, I can fill it, but when I confirm this form, the window closes and\u2026 nothing more. My account isn't connected, and if I close the \"Import games\" window, I can't reopen it unless I delete '.cache/lutris' directory. \r\n\r\n**Expected behavior**\r\n\r\nI should have a second GOG.com form asking for a PIN code, then my account should be connected in Lutris.\r\n\r\n**Current behavior**\r\n\r\nAs in description above. Plus technical details:\r\n\r\n* two files are created in '~/.cache/lutris/': .gog.auth and .gog.token (attached as [gog.auth.txt](https://github.com/lutris/lutris/files/4309081/gog.auth.txt) and [gog.token.txt](https://github.com/lutris/lutris/files/4309083/gog.token.txt))\r\n* according to standard output in terminal, some problems with Python, ending with an invalid cookie? (see [lutris.txt](https://github.com/lutris/lutris/files/4309117/lutris.txt))\r\n\r\n**Steps to reproduce**\r\n\r\nIt happens while importing GOG games from the app or installing GOG games from the website.\r\n\r\n**System information**\r\n\r\nFresh Lutris install, Arch Linux, kernel 5.5.8. More in [lutris.log](https://github.com/lutris/lutris/files/4309125/lutris.log)\n", "before_files": [{"content": "import time\nfrom http.cookiejar import MozillaCookieJar, Cookie, _warn_unhandled_exception\n\n\nclass WebkitCookieJar(MozillaCookieJar):\n \"\"\"Subclass of MozillaCookieJar for compatibility with cookies\n coming from Webkit2.\n This disables the magic_re header which is not present and adds\n compatibility with HttpOnly cookies (See http://bugs.python.org/issue2190)\n \"\"\"\n\n def _really_load(self, f, filename, ignore_discard, ignore_expires):\n now = time.time()\n try:\n while 1:\n line = f.readline()\n if line == \"\":\n break\n\n # last field may be absent, so keep any trailing tab\n if line.endswith(\"\\n\"):\n line = line[:-1]\n\n sline = line.strip()\n # support HttpOnly cookies (as stored by curl or old Firefox).\n if sline.startswith(\"#HttpOnly_\"):\n line = sline[10:]\n elif sline.startswith(\"#\") or sline == \"\":\n continue\n\n domain, domain_specified, path, secure, expires, name, value = line.split(\n \"\\t\"\n )\n secure = secure == \"TRUE\"\n domain_specified = domain_specified == \"TRUE\"\n if name == \"\":\n # cookies.txt regards 'Set-Cookie: foo' as a cookie\n # with no name, whereas http.cookiejar regards it as a\n # cookie with no value.\n name = value\n value = None\n\n initial_dot = domain.startswith(\".\")\n assert domain_specified == initial_dot\n\n discard = False\n if expires == \"\":\n expires = None\n discard = True\n\n # assume path_specified is false\n c = Cookie(\n 0,\n name,\n value,\n None,\n False,\n domain,\n domain_specified,\n initial_dot,\n path,\n False,\n secure,\n expires,\n discard,\n None,\n None,\n {},\n )\n if not ignore_discard and c.discard:\n continue\n if not ignore_expires and c.is_expired(now):\n continue\n self.set_cookie(c)\n\n except OSError:\n raise\n except Exception:\n _warn_unhandled_exception()\n raise OSError(\n \"invalid Netscape format cookies file %r: %r\" % (filename, line)\n )\n", "path": "lutris/util/cookies.py"}]} | 1,558 | 128 |
gh_patches_debug_17580 | rasdani/github-patches | git_diff | pyca__cryptography-3873 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot use BLAKE2b or BLAKE2s with HMAC
Python 3.6.2 on Ubuntu 17.10
Cryptography and dependencies installed via pip in virtualenv
cffi==1.10.0
cryptography==2.0.3
pip==9.0.1
setuptools==36.2.7
Steps to reproduce:
```
In [1]: from cryptography.hazmat.backends import default_backend
...: from cryptography.hazmat.primitives import hashes
...: from cryptography.hazmat.primitives.hmac import HMAC
...:
...: backend = default_backend()
...:
In [2]: hmac = HMAC(b'\x00'*32, hashes.SHA256(), backend) # just fine
In [3]: hmac = HMAC(b'\x00'*32, hashes.BLAKE2s(digest_size=32), backend)
---------------------------------------------------------------------------
UnsupportedAlgorithm Traceback (most recent call last)
<ipython-input-3-61f273a52c45> in <module>()
----> 1 hmac = HMAC(b'\x00'*32, hashes.BLAKE2s(digest_size=32), backend)
~/.venvs/master/lib/python3.6/site-packages/cryptography/hazmat/primitives/hmac.py in __init__(self, key, algorithm, backend, ctx)
30 self._key = key
31 if ctx is None:
---> 32 self._ctx = self._backend.create_hmac_ctx(key, self.algorithm)
33 else:
34 self._ctx = ctx
~/.venvs/master/lib/python3.6/site-packages/cryptography/hazmat/backends/openssl/backend.py in create_hmac_ctx(self, key, algorithm)
176
177 def create_hmac_ctx(self, key, algorithm):
--> 178 return _HMACContext(self, key, algorithm)
179
180 def _build_openssl_digest_name(self, algorithm):
~/.venvs/master/lib/python3.6/site-packages/cryptography/hazmat/backends/openssl/hmac.py in __init__(self, backend, key, algorithm, ctx)
32 "{0} is not a supported hash on this backend.".format(
33 algorithm.name),
---> 34 _Reasons.UNSUPPORTED_HASH
35 )
36 res = self._backend._lib.HMAC_Init_ex(
UnsupportedAlgorithm: blake2s is not a supported hash on this backend.
```
</issue>
<code>
[start of src/cryptography/hazmat/backends/openssl/hmac.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7
8 from cryptography import utils
9 from cryptography.exceptions import (
10 InvalidSignature, UnsupportedAlgorithm, _Reasons
11 )
12 from cryptography.hazmat.primitives import constant_time, hashes, mac
13
14
15 @utils.register_interface(mac.MACContext)
16 @utils.register_interface(hashes.HashContext)
17 class _HMACContext(object):
18 def __init__(self, backend, key, algorithm, ctx=None):
19 self._algorithm = algorithm
20 self._backend = backend
21
22 if ctx is None:
23 ctx = self._backend._lib.Cryptography_HMAC_CTX_new()
24 self._backend.openssl_assert(ctx != self._backend._ffi.NULL)
25 ctx = self._backend._ffi.gc(
26 ctx, self._backend._lib.Cryptography_HMAC_CTX_free
27 )
28 evp_md = self._backend._lib.EVP_get_digestbyname(
29 algorithm.name.encode('ascii'))
30 if evp_md == self._backend._ffi.NULL:
31 raise UnsupportedAlgorithm(
32 "{0} is not a supported hash on this backend.".format(
33 algorithm.name),
34 _Reasons.UNSUPPORTED_HASH
35 )
36 res = self._backend._lib.HMAC_Init_ex(
37 ctx, key, len(key), evp_md, self._backend._ffi.NULL
38 )
39 self._backend.openssl_assert(res != 0)
40
41 self._ctx = ctx
42 self._key = key
43
44 algorithm = utils.read_only_property("_algorithm")
45
46 def copy(self):
47 copied_ctx = self._backend._lib.Cryptography_HMAC_CTX_new()
48 self._backend.openssl_assert(copied_ctx != self._backend._ffi.NULL)
49 copied_ctx = self._backend._ffi.gc(
50 copied_ctx, self._backend._lib.Cryptography_HMAC_CTX_free
51 )
52 res = self._backend._lib.HMAC_CTX_copy(copied_ctx, self._ctx)
53 self._backend.openssl_assert(res != 0)
54 return _HMACContext(
55 self._backend, self._key, self.algorithm, ctx=copied_ctx
56 )
57
58 def update(self, data):
59 res = self._backend._lib.HMAC_Update(self._ctx, data, len(data))
60 self._backend.openssl_assert(res != 0)
61
62 def finalize(self):
63 buf = self._backend._ffi.new("unsigned char[]",
64 self._backend._lib.EVP_MAX_MD_SIZE)
65 outlen = self._backend._ffi.new("unsigned int *")
66 res = self._backend._lib.HMAC_Final(self._ctx, buf, outlen)
67 self._backend.openssl_assert(res != 0)
68 self._backend.openssl_assert(outlen[0] == self.algorithm.digest_size)
69 return self._backend._ffi.buffer(buf)[:outlen[0]]
70
71 def verify(self, signature):
72 digest = self.finalize()
73 if not constant_time.bytes_eq(digest, signature):
74 raise InvalidSignature("Signature did not match digest.")
75
[end of src/cryptography/hazmat/backends/openssl/hmac.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/hazmat/backends/openssl/hmac.py b/src/cryptography/hazmat/backends/openssl/hmac.py
--- a/src/cryptography/hazmat/backends/openssl/hmac.py
+++ b/src/cryptography/hazmat/backends/openssl/hmac.py
@@ -25,12 +25,11 @@
ctx = self._backend._ffi.gc(
ctx, self._backend._lib.Cryptography_HMAC_CTX_free
)
- evp_md = self._backend._lib.EVP_get_digestbyname(
- algorithm.name.encode('ascii'))
+ name = self._backend._build_openssl_digest_name(algorithm)
+ evp_md = self._backend._lib.EVP_get_digestbyname(name)
if evp_md == self._backend._ffi.NULL:
raise UnsupportedAlgorithm(
- "{0} is not a supported hash on this backend.".format(
- algorithm.name),
+ "{0} is not a supported hash on this backend".format(name),
_Reasons.UNSUPPORTED_HASH
)
res = self._backend._lib.HMAC_Init_ex(
| {"golden_diff": "diff --git a/src/cryptography/hazmat/backends/openssl/hmac.py b/src/cryptography/hazmat/backends/openssl/hmac.py\n--- a/src/cryptography/hazmat/backends/openssl/hmac.py\n+++ b/src/cryptography/hazmat/backends/openssl/hmac.py\n@@ -25,12 +25,11 @@\n ctx = self._backend._ffi.gc(\n ctx, self._backend._lib.Cryptography_HMAC_CTX_free\n )\n- evp_md = self._backend._lib.EVP_get_digestbyname(\n- algorithm.name.encode('ascii'))\n+ name = self._backend._build_openssl_digest_name(algorithm)\n+ evp_md = self._backend._lib.EVP_get_digestbyname(name)\n if evp_md == self._backend._ffi.NULL:\n raise UnsupportedAlgorithm(\n- \"{0} is not a supported hash on this backend.\".format(\n- algorithm.name),\n+ \"{0} is not a supported hash on this backend\".format(name),\n _Reasons.UNSUPPORTED_HASH\n )\n res = self._backend._lib.HMAC_Init_ex(\n", "issue": "Cannot use BLAKE2b or BLAKE2s with HMAC\nPython 3.6.2 on Ubuntu 17.10\r\nCryptography and dependencies installed via pip in virtualenv\r\ncffi==1.10.0\r\ncryptography==2.0.3\r\npip==9.0.1\r\nsetuptools==36.2.7\r\n\r\nSteps to reproduce:\r\n```\r\nIn [1]: from cryptography.hazmat.backends import default_backend\r\n ...: from cryptography.hazmat.primitives import hashes\r\n ...: from cryptography.hazmat.primitives.hmac import HMAC\r\n ...: \r\n ...: backend = default_backend()\r\n ...: \r\n\r\nIn [2]: hmac = HMAC(b'\\x00'*32, hashes.SHA256(), backend) # just fine\r\n\r\nIn [3]: hmac = HMAC(b'\\x00'*32, hashes.BLAKE2s(digest_size=32), backend)\r\n---------------------------------------------------------------------------\r\nUnsupportedAlgorithm Traceback (most recent call last)\r\n<ipython-input-3-61f273a52c45> in <module>()\r\n----> 1 hmac = HMAC(b'\\x00'*32, hashes.BLAKE2s(digest_size=32), backend)\r\n\r\n~/.venvs/master/lib/python3.6/site-packages/cryptography/hazmat/primitives/hmac.py in __init__(self, key, algorithm, backend, ctx)\r\n 30 self._key = key\r\n 31 if ctx is None:\r\n---> 32 self._ctx = self._backend.create_hmac_ctx(key, self.algorithm)\r\n 33 else:\r\n 34 self._ctx = ctx\r\n\r\n~/.venvs/master/lib/python3.6/site-packages/cryptography/hazmat/backends/openssl/backend.py in create_hmac_ctx(self, key, algorithm)\r\n 176 \r\n 177 def create_hmac_ctx(self, key, algorithm):\r\n--> 178 return _HMACContext(self, key, algorithm)\r\n 179 \r\n 180 def _build_openssl_digest_name(self, algorithm):\r\n\r\n~/.venvs/master/lib/python3.6/site-packages/cryptography/hazmat/backends/openssl/hmac.py in __init__(self, backend, key, algorithm, ctx)\r\n 32 \"{0} is not a supported hash on this backend.\".format(\r\n 33 algorithm.name),\r\n---> 34 _Reasons.UNSUPPORTED_HASH\r\n 35 )\r\n 36 res = self._backend._lib.HMAC_Init_ex(\r\n\r\nUnsupportedAlgorithm: blake2s is not a supported hash on this backend.\r\n```\r\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n InvalidSignature, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.primitives import constant_time, hashes, mac\n\n\[email protected]_interface(mac.MACContext)\[email protected]_interface(hashes.HashContext)\nclass _HMACContext(object):\n def __init__(self, backend, key, algorithm, ctx=None):\n self._algorithm = algorithm\n self._backend = backend\n\n if ctx is None:\n ctx = self._backend._lib.Cryptography_HMAC_CTX_new()\n self._backend.openssl_assert(ctx != self._backend._ffi.NULL)\n ctx = self._backend._ffi.gc(\n ctx, self._backend._lib.Cryptography_HMAC_CTX_free\n )\n evp_md = self._backend._lib.EVP_get_digestbyname(\n algorithm.name.encode('ascii'))\n if evp_md == self._backend._ffi.NULL:\n raise UnsupportedAlgorithm(\n \"{0} is not a supported hash on this backend.\".format(\n algorithm.name),\n _Reasons.UNSUPPORTED_HASH\n )\n res = self._backend._lib.HMAC_Init_ex(\n ctx, key, len(key), evp_md, self._backend._ffi.NULL\n )\n self._backend.openssl_assert(res != 0)\n\n self._ctx = ctx\n self._key = key\n\n algorithm = utils.read_only_property(\"_algorithm\")\n\n def copy(self):\n copied_ctx = self._backend._lib.Cryptography_HMAC_CTX_new()\n self._backend.openssl_assert(copied_ctx != self._backend._ffi.NULL)\n copied_ctx = self._backend._ffi.gc(\n copied_ctx, self._backend._lib.Cryptography_HMAC_CTX_free\n )\n res = self._backend._lib.HMAC_CTX_copy(copied_ctx, self._ctx)\n self._backend.openssl_assert(res != 0)\n return _HMACContext(\n self._backend, self._key, self.algorithm, ctx=copied_ctx\n )\n\n def update(self, data):\n res = self._backend._lib.HMAC_Update(self._ctx, data, len(data))\n self._backend.openssl_assert(res != 0)\n\n def finalize(self):\n buf = self._backend._ffi.new(\"unsigned char[]\",\n self._backend._lib.EVP_MAX_MD_SIZE)\n outlen = self._backend._ffi.new(\"unsigned int *\")\n res = self._backend._lib.HMAC_Final(self._ctx, buf, outlen)\n self._backend.openssl_assert(res != 0)\n self._backend.openssl_assert(outlen[0] == self.algorithm.digest_size)\n return self._backend._ffi.buffer(buf)[:outlen[0]]\n\n def verify(self, signature):\n digest = self.finalize()\n if not constant_time.bytes_eq(digest, signature):\n raise InvalidSignature(\"Signature did not match digest.\")\n", "path": "src/cryptography/hazmat/backends/openssl/hmac.py"}]} | 1,959 | 245 |
gh_patches_debug_10384 | rasdani/github-patches | git_diff | shuup__shuup-1558 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve the way permissionas are managed in admin
Currently, use has to use a select2 component with a lot of options and this is super boring, tedious and time consuming. Can we use a list of check boxes instead?
</issue>
<code>
[start of shuup/admin/modules/permission_groups/views/edit.py]
1 # -*- coding: utf-8 -*-
2 # This file is part of Shuup.
3 #
4 # Copyright (c) 2012-2018, Shuup Inc. All rights reserved.
5 #
6 # This source code is licensed under the OSL-3.0 license found in the
7 # LICENSE file in the root directory of this source tree.
8 from __future__ import unicode_literals
9
10 from django import forms
11 from django.contrib.auth import get_user_model
12 from django.contrib.auth.models import Group as PermissionGroup
13 from django.utils.encoding import force_text
14 from django.utils.translation import ugettext_lazy as _
15
16 from shuup.admin.forms.fields import Select2MultipleField
17 from shuup.admin.module_registry import get_modules
18 from shuup.admin.utils.permissions import get_permission_object_from_string
19 from shuup.admin.utils.views import CreateOrUpdateView
20
21
22 class PermissionGroupForm(forms.ModelForm):
23 class Meta:
24 model = PermissionGroup
25 exclude = ("permissions",)
26
27 def __init__(self, *args, **kwargs):
28 super(PermissionGroupForm, self).__init__(*args, **kwargs)
29 initial_permissions = self._get_initial_permissions()
30 self.fields["name"].help_text = _("The permission group name.")
31 self.fields["modules"] = forms.MultipleChoiceField(
32 choices=sorted(self._get_module_choices()),
33 initial=self._get_enabled_modules(initial_permissions),
34 required=False,
35 label=_("Module Permissions"),
36 help_text=_(
37 "Select the modules that should be accessible by this permission group. "
38 "Modules with the same permissions as selected modules will be added automatically."
39 )
40 )
41 initial_members = self._get_initial_members()
42 members_field = Select2MultipleField(
43 model=get_user_model(),
44 initial=[member.pk for member in initial_members],
45 required=False,
46 label=_("Members"),
47 help_text=_(
48 "Set the users that belong to this permission group."
49 )
50 )
51 members_field.widget.choices = [(member.pk, force_text(member)) for member in initial_members]
52 self.fields["members"] = members_field
53
54 def _get_module_choices(self):
55 return set((force_text(m.name), force_text(m.name)) for m in get_modules() if m.name != "_Base_")
56
57 def _get_initial_members(self):
58 if self.instance.pk:
59 return self.instance.user_set.all()
60 else:
61 return []
62
63 def _get_initial_permissions(self):
64 permissions = set()
65 if self.instance.pk:
66 for perm in self.instance.permissions.all():
67 name, module, _ = perm.natural_key()
68 permissions.add("%s.%s" % (module, name))
69 return permissions
70
71 def _get_enabled_modules(self, permissions):
72 if not self.instance.pk:
73 return []
74 permissions = set(permissions)
75 modules = []
76 for module in get_modules():
77 # Ignore modules that haven't configured a name
78 if module.name != "_Base_" and set(module.get_required_permissions()).issubset(permissions):
79 modules.append(force_text(module.name))
80 return modules
81
82 def _get_required_permissions(self, modules):
83 permissions = set()
84 for module in [m for m in get_modules() if m.name in modules]:
85 permissions.update(set(module.get_required_permissions()))
86 return permissions
87
88 def clean_members(self):
89 members = self.cleaned_data.get("members", [])
90
91 return get_user_model().objects.filter(pk__in=members).all()
92
93 def clean(self):
94 cleaned_data = super(PermissionGroupForm, self).clean()
95
96 permissions = set()
97 modules = cleaned_data.pop("modules", [])
98 required_permissions = self._get_required_permissions(modules)
99
100 for permission in required_permissions:
101 permissions.add(get_permission_object_from_string(permission))
102
103 cleaned_data["required_permissions"] = permissions
104
105 return cleaned_data
106
107 def save(self):
108 obj = super(PermissionGroupForm, self).save()
109 obj.permissions = set(self.cleaned_data["required_permissions"])
110 obj.user_set = set(self.cleaned_data["members"])
111 return obj
112
113
114 class PermissionGroupEditView(CreateOrUpdateView):
115 model = PermissionGroup
116 form_class = PermissionGroupForm
117 template_name = "shuup/admin/permission_groups/edit.jinja"
118 context_object_name = "permission_group"
119 add_form_errors_as_messages = True
120
[end of shuup/admin/modules/permission_groups/views/edit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/shuup/admin/modules/permission_groups/views/edit.py b/shuup/admin/modules/permission_groups/views/edit.py
--- a/shuup/admin/modules/permission_groups/views/edit.py
+++ b/shuup/admin/modules/permission_groups/views/edit.py
@@ -36,7 +36,8 @@
help_text=_(
"Select the modules that should be accessible by this permission group. "
"Modules with the same permissions as selected modules will be added automatically."
- )
+ ),
+ widget=forms.CheckboxSelectMultiple
)
initial_members = self._get_initial_members()
members_field = Select2MultipleField(
| {"golden_diff": "diff --git a/shuup/admin/modules/permission_groups/views/edit.py b/shuup/admin/modules/permission_groups/views/edit.py\n--- a/shuup/admin/modules/permission_groups/views/edit.py\n+++ b/shuup/admin/modules/permission_groups/views/edit.py\n@@ -36,7 +36,8 @@\n help_text=_(\n \"Select the modules that should be accessible by this permission group. \"\n \"Modules with the same permissions as selected modules will be added automatically.\"\n- )\n+ ),\n+ widget=forms.CheckboxSelectMultiple\n )\n initial_members = self._get_initial_members()\n members_field = Select2MultipleField(\n", "issue": "Improve the way permissionas are managed in admin\nCurrently, use has to use a select2 component with a lot of options and this is super boring, tedious and time consuming. Can we use a list of check boxes instead?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom __future__ import unicode_literals\n\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group as PermissionGroup\nfrom django.utils.encoding import force_text\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom shuup.admin.forms.fields import Select2MultipleField\nfrom shuup.admin.module_registry import get_modules\nfrom shuup.admin.utils.permissions import get_permission_object_from_string\nfrom shuup.admin.utils.views import CreateOrUpdateView\n\n\nclass PermissionGroupForm(forms.ModelForm):\n class Meta:\n model = PermissionGroup\n exclude = (\"permissions\",)\n\n def __init__(self, *args, **kwargs):\n super(PermissionGroupForm, self).__init__(*args, **kwargs)\n initial_permissions = self._get_initial_permissions()\n self.fields[\"name\"].help_text = _(\"The permission group name.\")\n self.fields[\"modules\"] = forms.MultipleChoiceField(\n choices=sorted(self._get_module_choices()),\n initial=self._get_enabled_modules(initial_permissions),\n required=False,\n label=_(\"Module Permissions\"),\n help_text=_(\n \"Select the modules that should be accessible by this permission group. \"\n \"Modules with the same permissions as selected modules will be added automatically.\"\n )\n )\n initial_members = self._get_initial_members()\n members_field = Select2MultipleField(\n model=get_user_model(),\n initial=[member.pk for member in initial_members],\n required=False,\n label=_(\"Members\"),\n help_text=_(\n \"Set the users that belong to this permission group.\"\n )\n )\n members_field.widget.choices = [(member.pk, force_text(member)) for member in initial_members]\n self.fields[\"members\"] = members_field\n\n def _get_module_choices(self):\n return set((force_text(m.name), force_text(m.name)) for m in get_modules() if m.name != \"_Base_\")\n\n def _get_initial_members(self):\n if self.instance.pk:\n return self.instance.user_set.all()\n else:\n return []\n\n def _get_initial_permissions(self):\n permissions = set()\n if self.instance.pk:\n for perm in self.instance.permissions.all():\n name, module, _ = perm.natural_key()\n permissions.add(\"%s.%s\" % (module, name))\n return permissions\n\n def _get_enabled_modules(self, permissions):\n if not self.instance.pk:\n return []\n permissions = set(permissions)\n modules = []\n for module in get_modules():\n # Ignore modules that haven't configured a name\n if module.name != \"_Base_\" and set(module.get_required_permissions()).issubset(permissions):\n modules.append(force_text(module.name))\n return modules\n\n def _get_required_permissions(self, modules):\n permissions = set()\n for module in [m for m in get_modules() if m.name in modules]:\n permissions.update(set(module.get_required_permissions()))\n return permissions\n\n def clean_members(self):\n members = self.cleaned_data.get(\"members\", [])\n\n return get_user_model().objects.filter(pk__in=members).all()\n\n def clean(self):\n cleaned_data = super(PermissionGroupForm, self).clean()\n\n permissions = set()\n modules = cleaned_data.pop(\"modules\", [])\n required_permissions = self._get_required_permissions(modules)\n\n for permission in required_permissions:\n permissions.add(get_permission_object_from_string(permission))\n\n cleaned_data[\"required_permissions\"] = permissions\n\n return cleaned_data\n\n def save(self):\n obj = super(PermissionGroupForm, self).save()\n obj.permissions = set(self.cleaned_data[\"required_permissions\"])\n obj.user_set = set(self.cleaned_data[\"members\"])\n return obj\n\n\nclass PermissionGroupEditView(CreateOrUpdateView):\n model = PermissionGroup\n form_class = PermissionGroupForm\n template_name = \"shuup/admin/permission_groups/edit.jinja\"\n context_object_name = \"permission_group\"\n add_form_errors_as_messages = True\n", "path": "shuup/admin/modules/permission_groups/views/edit.py"}]} | 1,764 | 140 |
gh_patches_debug_24667 | rasdani/github-patches | git_diff | saleor__saleor-2665 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement product types section in dashboard
Blocked by #2679
</issue>
<code>
[start of saleor/core/__init__.py]
1 from django.conf import settings
2 from django.core.checks import Warning, register
3 from django.utils.translation import pgettext_lazy
4
5 TOKEN_PATTERN = ('(?P<token>[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}'
6 '-[0-9a-z]{12})')
7
8
9 @register()
10 def check_session_caching(app_configs, **kwargs): # pragma: no cover
11 errors = []
12 cached_engines = {
13 'django.contrib.sessions.backends.cache',
14 'django.contrib.sessions.backends.cached_db'}
15 if ('locmem' in settings.CACHES['default']['BACKEND'] and
16 settings.SESSION_ENGINE in cached_engines):
17 errors.append(
18 Warning(
19 'Session caching cannot work with locmem backend',
20 'User sessions need to be globally shared, use a cache server'
21 ' like Redis.',
22 'saleor.W001'))
23 return errors
24
25
26 class TaxRateType:
27 ACCOMODATION = 'accomodation'
28 ADMISSION_TO_CULTURAL_EVENTS = 'admission to cultural events'
29 ADMISSION_TO_ENTERAINMENT_EVENTS = 'admission to entertainment events'
30 ADMISSION_TO_SPORTING_EVENTS = 'admission to sporting events'
31 ADVERTISING = 'advertising'
32 AGRICULTURAL_SUPPLIES = 'agricultural supplies'
33 BABY_FOODSTUFFS = 'baby foodstuffs'
34 BIKES = 'bikes'
35 BOOKS = 'books'
36 CHILDRENDS_CLOTHING = 'childrens clothing'
37 DOMESTIC_FUEL = 'domestic fuel'
38 DOMESTIC_SERVICES = 'domestic services'
39 E_BOOKS = 'e-books'
40 FOODSTUFFS = 'foodstuffs'
41 HOTELS = 'hotels'
42 MEDICAL = 'medical'
43 NEWSPAPERS = 'newspapers'
44 PASSENGER_TRANSPORT = 'passenger transport'
45 PHARMACEUTICALS = 'pharmaceuticals'
46 PROPERTY_RENOVATIONS = 'property renovations'
47 RESTAURANTS = 'restaurants'
48 SOCIAL_HOUSING = 'social housing'
49 STANDARD = 'standard'
50 WATER = 'water'
51 WINE = 'wine'
52
53 CHOICES = (
54 (ACCOMODATION, pgettext_lazy('VAT rate type', 'accommodation')),
55 (ADMISSION_TO_CULTURAL_EVENTS, pgettext_lazy(
56 'VAT rate type', 'admission to cultural events')),
57 (ADMISSION_TO_ENTERAINMENT_EVENTS, pgettext_lazy(
58 'VAT rate type', 'admission to entertainment events')),
59 (ADMISSION_TO_SPORTING_EVENTS, pgettext_lazy(
60 'VAT rate type', 'admission to sporting events')),
61 (ADVERTISING, pgettext_lazy('VAT rate type', 'advertising')),
62 (AGRICULTURAL_SUPPLIES, pgettext_lazy(
63 'VAT rate type', 'agricultural supplies')),
64 (BABY_FOODSTUFFS, pgettext_lazy('VAT rate type', 'baby foodstuffs')),
65 (BIKES, pgettext_lazy('VAT rate type', 'bikes')),
66 (BOOKS, pgettext_lazy('VAT rate type', 'books')),
67 (CHILDRENDS_CLOTHING, pgettext_lazy(
68 'VAT rate type', 'childrens clothing')),
69 (DOMESTIC_FUEL, pgettext_lazy('VAT rate type', 'domestic fuel')),
70 (DOMESTIC_SERVICES, pgettext_lazy(
71 'VAT rate type', 'domestic services')),
72 (E_BOOKS, pgettext_lazy('VAT rate type', 'e-books')),
73 (FOODSTUFFS, pgettext_lazy('VAT rate type', 'foodstuffs')),
74 (HOTELS, pgettext_lazy('VAT rate type', 'hotels')),
75 (MEDICAL, pgettext_lazy('VAT rate type', 'medical')),
76 (NEWSPAPERS, pgettext_lazy('VAT rate type', 'newspapers')),
77 (PASSENGER_TRANSPORT, pgettext_lazy(
78 'VAT rate type', 'passenger transport')),
79 (PHARMACEUTICALS, pgettext_lazy(
80 'VAT rate type', 'pharmaceuticals')),
81 (PROPERTY_RENOVATIONS, pgettext_lazy(
82 'VAT rate type', 'property renovations')),
83 (RESTAURANTS, pgettext_lazy('VAT rate type', 'restaurants')),
84 (SOCIAL_HOUSING, pgettext_lazy('VAT rate type', 'social housing')),
85 (STANDARD, pgettext_lazy('VAT rate type', 'standard')),
86 (WATER, pgettext_lazy('VAT rate type', 'water')))
87
[end of saleor/core/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/core/__init__.py b/saleor/core/__init__.py
--- a/saleor/core/__init__.py
+++ b/saleor/core/__init__.py
@@ -26,7 +26,7 @@
class TaxRateType:
ACCOMODATION = 'accomodation'
ADMISSION_TO_CULTURAL_EVENTS = 'admission to cultural events'
- ADMISSION_TO_ENTERAINMENT_EVENTS = 'admission to entertainment events'
+ ADMISSION_TO_ENTERTAINMENT_EVENTS = 'admission to entertainment events'
ADMISSION_TO_SPORTING_EVENTS = 'admission to sporting events'
ADVERTISING = 'advertising'
AGRICULTURAL_SUPPLIES = 'agricultural supplies'
@@ -54,7 +54,7 @@
(ACCOMODATION, pgettext_lazy('VAT rate type', 'accommodation')),
(ADMISSION_TO_CULTURAL_EVENTS, pgettext_lazy(
'VAT rate type', 'admission to cultural events')),
- (ADMISSION_TO_ENTERAINMENT_EVENTS, pgettext_lazy(
+ (ADMISSION_TO_ENTERTAINMENT_EVENTS, pgettext_lazy(
'VAT rate type', 'admission to entertainment events')),
(ADMISSION_TO_SPORTING_EVENTS, pgettext_lazy(
'VAT rate type', 'admission to sporting events')),
| {"golden_diff": "diff --git a/saleor/core/__init__.py b/saleor/core/__init__.py\n--- a/saleor/core/__init__.py\n+++ b/saleor/core/__init__.py\n@@ -26,7 +26,7 @@\n class TaxRateType:\n ACCOMODATION = 'accomodation'\n ADMISSION_TO_CULTURAL_EVENTS = 'admission to cultural events'\n- ADMISSION_TO_ENTERAINMENT_EVENTS = 'admission to entertainment events'\n+ ADMISSION_TO_ENTERTAINMENT_EVENTS = 'admission to entertainment events'\n ADMISSION_TO_SPORTING_EVENTS = 'admission to sporting events'\n ADVERTISING = 'advertising'\n AGRICULTURAL_SUPPLIES = 'agricultural supplies'\n@@ -54,7 +54,7 @@\n (ACCOMODATION, pgettext_lazy('VAT rate type', 'accommodation')),\n (ADMISSION_TO_CULTURAL_EVENTS, pgettext_lazy(\n 'VAT rate type', 'admission to cultural events')),\n- (ADMISSION_TO_ENTERAINMENT_EVENTS, pgettext_lazy(\n+ (ADMISSION_TO_ENTERTAINMENT_EVENTS, pgettext_lazy(\n 'VAT rate type', 'admission to entertainment events')),\n (ADMISSION_TO_SPORTING_EVENTS, pgettext_lazy(\n 'VAT rate type', 'admission to sporting events')),\n", "issue": "Implement product types section in dashboard\nBlocked by #2679 \n", "before_files": [{"content": "from django.conf import settings\nfrom django.core.checks import Warning, register\nfrom django.utils.translation import pgettext_lazy\n\nTOKEN_PATTERN = ('(?P<token>[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}'\n '-[0-9a-z]{12})')\n\n\n@register()\ndef check_session_caching(app_configs, **kwargs): # pragma: no cover\n errors = []\n cached_engines = {\n 'django.contrib.sessions.backends.cache',\n 'django.contrib.sessions.backends.cached_db'}\n if ('locmem' in settings.CACHES['default']['BACKEND'] and\n settings.SESSION_ENGINE in cached_engines):\n errors.append(\n Warning(\n 'Session caching cannot work with locmem backend',\n 'User sessions need to be globally shared, use a cache server'\n ' like Redis.',\n 'saleor.W001'))\n return errors\n\n\nclass TaxRateType:\n ACCOMODATION = 'accomodation'\n ADMISSION_TO_CULTURAL_EVENTS = 'admission to cultural events'\n ADMISSION_TO_ENTERAINMENT_EVENTS = 'admission to entertainment events'\n ADMISSION_TO_SPORTING_EVENTS = 'admission to sporting events'\n ADVERTISING = 'advertising'\n AGRICULTURAL_SUPPLIES = 'agricultural supplies'\n BABY_FOODSTUFFS = 'baby foodstuffs'\n BIKES = 'bikes'\n BOOKS = 'books'\n CHILDRENDS_CLOTHING = 'childrens clothing'\n DOMESTIC_FUEL = 'domestic fuel'\n DOMESTIC_SERVICES = 'domestic services'\n E_BOOKS = 'e-books'\n FOODSTUFFS = 'foodstuffs'\n HOTELS = 'hotels'\n MEDICAL = 'medical'\n NEWSPAPERS = 'newspapers'\n PASSENGER_TRANSPORT = 'passenger transport'\n PHARMACEUTICALS = 'pharmaceuticals'\n PROPERTY_RENOVATIONS = 'property renovations'\n RESTAURANTS = 'restaurants'\n SOCIAL_HOUSING = 'social housing'\n STANDARD = 'standard'\n WATER = 'water'\n WINE = 'wine'\n\n CHOICES = (\n (ACCOMODATION, pgettext_lazy('VAT rate type', 'accommodation')),\n (ADMISSION_TO_CULTURAL_EVENTS, pgettext_lazy(\n 'VAT rate type', 'admission to cultural events')),\n (ADMISSION_TO_ENTERAINMENT_EVENTS, pgettext_lazy(\n 'VAT rate type', 'admission to entertainment events')),\n (ADMISSION_TO_SPORTING_EVENTS, pgettext_lazy(\n 'VAT rate type', 'admission to sporting events')),\n (ADVERTISING, pgettext_lazy('VAT rate type', 'advertising')),\n (AGRICULTURAL_SUPPLIES, pgettext_lazy(\n 'VAT rate type', 'agricultural supplies')),\n (BABY_FOODSTUFFS, pgettext_lazy('VAT rate type', 'baby foodstuffs')),\n (BIKES, pgettext_lazy('VAT rate type', 'bikes')),\n (BOOKS, pgettext_lazy('VAT rate type', 'books')),\n (CHILDRENDS_CLOTHING, pgettext_lazy(\n 'VAT rate type', 'childrens clothing')),\n (DOMESTIC_FUEL, pgettext_lazy('VAT rate type', 'domestic fuel')),\n (DOMESTIC_SERVICES, pgettext_lazy(\n 'VAT rate type', 'domestic services')),\n (E_BOOKS, pgettext_lazy('VAT rate type', 'e-books')),\n (FOODSTUFFS, pgettext_lazy('VAT rate type', 'foodstuffs')),\n (HOTELS, pgettext_lazy('VAT rate type', 'hotels')),\n (MEDICAL, pgettext_lazy('VAT rate type', 'medical')),\n (NEWSPAPERS, pgettext_lazy('VAT rate type', 'newspapers')),\n (PASSENGER_TRANSPORT, pgettext_lazy(\n 'VAT rate type', 'passenger transport')),\n (PHARMACEUTICALS, pgettext_lazy(\n 'VAT rate type', 'pharmaceuticals')),\n (PROPERTY_RENOVATIONS, pgettext_lazy(\n 'VAT rate type', 'property renovations')),\n (RESTAURANTS, pgettext_lazy('VAT rate type', 'restaurants')),\n (SOCIAL_HOUSING, pgettext_lazy('VAT rate type', 'social housing')),\n (STANDARD, pgettext_lazy('VAT rate type', 'standard')),\n (WATER, pgettext_lazy('VAT rate type', 'water')))\n", "path": "saleor/core/__init__.py"}]} | 1,753 | 302 |
gh_patches_debug_34147 | rasdani/github-patches | git_diff | kivy__python-for-android-1410 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Comprehensive list of broken recipes
When working on https://github.com/kivy/python-for-android/pull/1401 I realised we still have some broken recipes in the tree at least for python3crystax.
Even though we don't want to have red builds for things that were already broken, we still want to have a clear status of what's broken and what's not.
Basically the idea is to try to compile every single recipes and add the broken ones in the ignore list (`BROKEN_RECIPES`) from #1401. That way we can track and fix them later on meanwhile keeping a green build.
I would like to address it in this task. Basically the output of the task should be a PR making the `BROKEN_RECIPES` list comprehensive. With bonus points for creating an issue per broken recipes :smile:
</issue>
<code>
[start of ci/constants.py]
1 from enum import Enum
2
3
4 class TargetPython(Enum):
5 python2 = 0
6 python3crystax = 1
7
8
9 # recipes that currently break the build
10 # a recipe could be broken for a target Python and not for the other,
11 # hence we're maintaining one list per Python target
12 BROKEN_RECIPES_PYTHON2 = set([])
13 BROKEN_RECIPES_PYTHON3_CRYSTAX = set([
14 # not yet python3crystax compatible
15 'apsw', 'atom', 'boost', 'brokenrecipe', 'cdecimal', 'cherrypy',
16 'coverage', 'dateutil', 'enaml', 'ethash', 'kiwisolver', 'libgeos',
17 'libnacl', 'libsodium', 'libtorrent', 'libtribler', 'libzbar', 'libzmq',
18 'm2crypto', 'mysqldb', 'ndghttpsclient', 'pil', 'pycrypto', 'pyethereum',
19 'pygame', 'pyleveldb', 'pyproj', 'pyzmq', 'regex', 'shapely',
20 'simple-crypt', 'twsisted', 'vispy', 'websocket-client', 'zbar',
21 'zeroconf', 'zope',
22 # https://github.com/kivy/python-for-android/issues/550
23 'audiostream',
24 # enum34 is not compatible with Python 3.6 standard library
25 # https://stackoverflow.com/a/45716067/185510
26 'enum34',
27 # https://github.com/kivy/python-for-android/issues/1398
28 'ifaddrs',
29 # https://github.com/kivy/python-for-android/issues/1399
30 'libglob',
31 # cannot find -lcrystax
32 'cffi', 'pycryptodome', 'pymuk', 'secp256k1',
33 # https://github.com/kivy/python-for-android/issues/1404
34 'cryptography',
35 # https://github.com/kivy/python-for-android/issues/1294
36 'ffmpeg', 'ffpyplayer',
37 # https://github.com/kivy/python-for-android/pull/1307 ?
38 'gevent',
39 'icu',
40 # https://github.com/kivy/python-for-android/issues/1354
41 'kivent_core', 'kivent_cymunk', 'kivent_particles', 'kivent_polygen',
42 'kivy',
43 # https://github.com/kivy/python-for-android/issues/1405
44 'libpq', 'psycopg2',
45 'netifaces',
46 # https://github.com/kivy/python-for-android/issues/1315 ?
47 'opencv',
48 'protobuf_cpp',
49 # most likely some setup in the Docker container, because it works in host
50 'pyjnius', 'pyopenal',
51 # SyntaxError: invalid syntax (Python2)
52 'storm',
53 'vlc',
54 ])
55 BROKEN_RECIPES = {
56 TargetPython.python2: BROKEN_RECIPES_PYTHON2,
57 TargetPython.python3crystax: BROKEN_RECIPES_PYTHON3_CRYSTAX,
58 }
59 # recipes that were already built will be skipped
60 CORE_RECIPES = set([
61 'pyjnius', 'kivy', 'openssl', 'requests', 'sqlite3', 'setuptools',
62 'numpy', 'android', 'python2',
63 ])
64
[end of ci/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ci/constants.py b/ci/constants.py
--- a/ci/constants.py
+++ b/ci/constants.py
@@ -9,7 +9,57 @@
# recipes that currently break the build
# a recipe could be broken for a target Python and not for the other,
# hence we're maintaining one list per Python target
-BROKEN_RECIPES_PYTHON2 = set([])
+BROKEN_RECIPES_PYTHON2 = set([
+ # pythonhelpers.h:12:18: fatal error: string: No such file or directory
+ 'atom',
+ # https://github.com/kivy/python-for-android/issues/550
+ 'audiostream',
+ 'brokenrecipe',
+ # https://github.com/kivy/python-for-android/issues/1409
+ 'enaml',
+ 'evdev',
+ # distutils.errors.DistutilsError
+ # Could not find suitable distribution for Requirement.parse('cython')
+ 'ffpyplayer',
+ 'flask',
+ 'groestlcoin_hash',
+ 'hostpython3crystax',
+ # https://github.com/kivy/python-for-android/issues/1398
+ 'ifaddrs',
+ # https://github.com/kivy/python-for-android/issues/1354
+ 'kivent_core', 'kivent_cymunk', 'kivent_particles', 'kivent_polygen',
+ 'kiwisolver',
+ # system dependencies autoconf, libtool
+ 'libexpat',
+ 'libgeos',
+ # https://github.com/kivy/python-for-android/issues/1399
+ 'libglob',
+ # system dependencies cmake and compile error
+ 'libmysqlclient',
+ 'libsecp256k1',
+ 'libtribler',
+ # system dependencies gettext, pkg-config
+ 'libzbar',
+ 'ndghttpsclient',
+ 'm2crypto',
+ 'netifaces',
+ 'Pillow',
+ # https://github.com/kivy/python-for-android/issues/1405
+ 'psycopg2',
+ 'pygame',
+ # most likely some setup in the Docker container, because it works in host
+ 'pyjnius', 'pyopenal',
+ 'pyproj',
+ 'pysdl2',
+ 'pyzmq',
+ 'secp256k1',
+ 'shapely',
+ 'twisted',
+ 'vlc',
+ 'websocket-client',
+ 'zeroconf',
+ 'zope',
+])
BROKEN_RECIPES_PYTHON3_CRYSTAX = set([
# not yet python3crystax compatible
'apsw', 'atom', 'boost', 'brokenrecipe', 'cdecimal', 'cherrypy',
@@ -39,7 +89,8 @@
'icu',
# https://github.com/kivy/python-for-android/issues/1354
'kivent_core', 'kivent_cymunk', 'kivent_particles', 'kivent_polygen',
- 'kivy',
+ # system dependencies autoconf, libtool
+ 'libexpat',
# https://github.com/kivy/python-for-android/issues/1405
'libpq', 'psycopg2',
'netifaces',
| {"golden_diff": "diff --git a/ci/constants.py b/ci/constants.py\n--- a/ci/constants.py\n+++ b/ci/constants.py\n@@ -9,7 +9,57 @@\n # recipes that currently break the build\n # a recipe could be broken for a target Python and not for the other,\n # hence we're maintaining one list per Python target\n-BROKEN_RECIPES_PYTHON2 = set([])\n+BROKEN_RECIPES_PYTHON2 = set([\n+ # pythonhelpers.h:12:18: fatal error: string: No such file or directory\n+ 'atom',\n+ # https://github.com/kivy/python-for-android/issues/550\n+ 'audiostream',\n+ 'brokenrecipe',\n+ # https://github.com/kivy/python-for-android/issues/1409\n+ 'enaml',\n+ 'evdev',\n+ # distutils.errors.DistutilsError\n+ # Could not find suitable distribution for Requirement.parse('cython')\n+ 'ffpyplayer',\n+ 'flask',\n+ 'groestlcoin_hash',\n+ 'hostpython3crystax',\n+ # https://github.com/kivy/python-for-android/issues/1398\n+ 'ifaddrs',\n+ # https://github.com/kivy/python-for-android/issues/1354\n+ 'kivent_core', 'kivent_cymunk', 'kivent_particles', 'kivent_polygen',\n+ 'kiwisolver',\n+ # system dependencies autoconf, libtool\n+ 'libexpat',\n+ 'libgeos',\n+ # https://github.com/kivy/python-for-android/issues/1399\n+ 'libglob',\n+ # system dependencies cmake and compile error\n+ 'libmysqlclient',\n+ 'libsecp256k1',\n+ 'libtribler',\n+ # system dependencies gettext, pkg-config\n+ 'libzbar',\n+ 'ndghttpsclient',\n+ 'm2crypto',\n+ 'netifaces',\n+ 'Pillow',\n+ # https://github.com/kivy/python-for-android/issues/1405\n+ 'psycopg2',\n+ 'pygame',\n+ # most likely some setup in the Docker container, because it works in host\n+ 'pyjnius', 'pyopenal',\n+ 'pyproj',\n+ 'pysdl2',\n+ 'pyzmq',\n+ 'secp256k1',\n+ 'shapely',\n+ 'twisted',\n+ 'vlc',\n+ 'websocket-client',\n+ 'zeroconf',\n+ 'zope',\n+])\n BROKEN_RECIPES_PYTHON3_CRYSTAX = set([\n # not yet python3crystax compatible\n 'apsw', 'atom', 'boost', 'brokenrecipe', 'cdecimal', 'cherrypy',\n@@ -39,7 +89,8 @@\n 'icu',\n # https://github.com/kivy/python-for-android/issues/1354\n 'kivent_core', 'kivent_cymunk', 'kivent_particles', 'kivent_polygen',\n- 'kivy',\n+ # system dependencies autoconf, libtool\n+ 'libexpat',\n # https://github.com/kivy/python-for-android/issues/1405\n 'libpq', 'psycopg2',\n 'netifaces',\n", "issue": "Comprehensive list of broken recipes\nWhen working on https://github.com/kivy/python-for-android/pull/1401 I realised we still have some broken recipes in the tree at least for python3crystax.\r\nEven though we don't want to have red builds for things that were already broken, we still want to have a clear status of what's broken and what's not.\r\nBasically the idea is to try to compile every single recipes and add the broken ones in the ignore list (`BROKEN_RECIPES`) from #1401. That way we can track and fix them later on meanwhile keeping a green build.\r\nI would like to address it in this task. Basically the output of the task should be a PR making the `BROKEN_RECIPES` list comprehensive. With bonus points for creating an issue per broken recipes :smile: \r\n\n", "before_files": [{"content": "from enum import Enum\n\n\nclass TargetPython(Enum):\n python2 = 0\n python3crystax = 1\n\n\n# recipes that currently break the build\n# a recipe could be broken for a target Python and not for the other,\n# hence we're maintaining one list per Python target\nBROKEN_RECIPES_PYTHON2 = set([])\nBROKEN_RECIPES_PYTHON3_CRYSTAX = set([\n # not yet python3crystax compatible\n 'apsw', 'atom', 'boost', 'brokenrecipe', 'cdecimal', 'cherrypy',\n 'coverage', 'dateutil', 'enaml', 'ethash', 'kiwisolver', 'libgeos',\n 'libnacl', 'libsodium', 'libtorrent', 'libtribler', 'libzbar', 'libzmq',\n 'm2crypto', 'mysqldb', 'ndghttpsclient', 'pil', 'pycrypto', 'pyethereum',\n 'pygame', 'pyleveldb', 'pyproj', 'pyzmq', 'regex', 'shapely',\n 'simple-crypt', 'twsisted', 'vispy', 'websocket-client', 'zbar',\n 'zeroconf', 'zope',\n # https://github.com/kivy/python-for-android/issues/550\n 'audiostream',\n # enum34 is not compatible with Python 3.6 standard library\n # https://stackoverflow.com/a/45716067/185510\n 'enum34',\n # https://github.com/kivy/python-for-android/issues/1398\n 'ifaddrs',\n # https://github.com/kivy/python-for-android/issues/1399\n 'libglob',\n # cannot find -lcrystax\n 'cffi', 'pycryptodome', 'pymuk', 'secp256k1',\n # https://github.com/kivy/python-for-android/issues/1404\n 'cryptography',\n # https://github.com/kivy/python-for-android/issues/1294\n 'ffmpeg', 'ffpyplayer',\n # https://github.com/kivy/python-for-android/pull/1307 ?\n 'gevent',\n 'icu',\n # https://github.com/kivy/python-for-android/issues/1354\n 'kivent_core', 'kivent_cymunk', 'kivent_particles', 'kivent_polygen',\n 'kivy',\n # https://github.com/kivy/python-for-android/issues/1405\n 'libpq', 'psycopg2',\n 'netifaces',\n # https://github.com/kivy/python-for-android/issues/1315 ?\n 'opencv',\n 'protobuf_cpp',\n # most likely some setup in the Docker container, because it works in host\n 'pyjnius', 'pyopenal',\n # SyntaxError: invalid syntax (Python2)\n 'storm',\n 'vlc',\n])\nBROKEN_RECIPES = {\n TargetPython.python2: BROKEN_RECIPES_PYTHON2,\n TargetPython.python3crystax: BROKEN_RECIPES_PYTHON3_CRYSTAX,\n}\n# recipes that were already built will be skipped\nCORE_RECIPES = set([\n 'pyjnius', 'kivy', 'openssl', 'requests', 'sqlite3', 'setuptools',\n 'numpy', 'android', 'python2',\n])\n", "path": "ci/constants.py"}]} | 1,587 | 753 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.