problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_15434
rasdani/github-patches
git_diff
meltano__meltano-6710
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Run Pytest tests in parallel https://meltano.slack.com/archives/CKHP6G5V4/p1653508630941889 Looks like pytest doesn't take 45 minutes, not sure why I though it was taking that long before? Maybe on gitlab CI updated the title to reflect that it takes ~5-10 minutes to run pytest today. 1. Be sure https://github.com/meltano/meltano/pull/6155/files#diff-65c008e1065903299dfa0da25e1f18d0f87f387867ccbbf2b0888eb138bdb255L242 is implemented (This uses a seperate folder for each Test in your temp directory instead of the same folder over and over again) 1. `poetry add -D pytest-xdist==1.*` 1. `poetry run pytest -n 20` 1. Watch tests run extremely quickly There's still a large number of failures here as some tests depend on the other I believe? Not exactly sure what causes the failures, but it's clear they weren't all designed with parallel testing in mind. The gain is big enough it may be worth the time to figure out what causes the failures. </issue> <code> [start of noxfile.py] 1 """Nox configuration.""" 2 3 from __future__ import annotations 4 5 import os 6 import sys 7 from pathlib import Path 8 from random import randint 9 from textwrap import dedent 10 11 try: 12 from nox_poetry import Session 13 from nox_poetry import session as nox_session 14 except ImportError: 15 message = f"""\ 16 Nox failed to import the 'nox-poetry' package. 17 Please install it using the following command: 18 {sys.executable} -m pip install nox-poetry""" 19 raise SystemExit(dedent(message)) from None 20 21 22 package = "meltano" 23 python_versions = ["3.10", "3.9", "3.8", "3.7"] 24 main_python_version = "3.9" 25 locations = "src", "tests", "noxfile.py" 26 27 28 @nox_session(python=python_versions) 29 def tests(session: Session) -> None: 30 """Execute pytest tests and compute coverage. 31 32 Args: 33 session: Nox session. 34 """ 35 backend_db = os.environ.get("PYTEST_BACKEND", "sqlite") 36 37 if backend_db == "mssql": 38 session.install(".[mssql]") 39 else: 40 session.install(".") 41 42 session.install( 43 "coverage[toml]", 44 "freezegun", 45 "mock", 46 "pytest", 47 "pytest-asyncio", 48 "pytest-docker", 49 "pytest-order", 50 "pytest-randomly", 51 "requests-mock", 52 ) 53 54 try: 55 session.run( 56 "coverage", 57 "run", 58 "--parallel", 59 "-m", 60 "pytest", 61 f"--randomly-seed={randint(0, 2**32-1)}", # noqa: S311, WPS432 62 *session.posargs, 63 env={"NOX_CURRENT_SESSION": "tests"}, 64 ) 65 finally: 66 if session.interactive: 67 session.notify("coverage", posargs=[]) 68 69 70 @nox_session(python=main_python_version) 71 def coverage(session: Session) -> None: 72 """Upload coverage data. 73 74 Args: 75 session: Nox session. 76 """ 77 args = session.posargs or ["report"] 78 79 session.install("coverage[toml]") 80 81 if not session.posargs and any(Path().glob(".coverage.*")): 82 session.run("coverage", "combine") 83 84 session.run("coverage", *args) 85 86 87 @nox_session(python=main_python_version) 88 def mypy(session: Session) -> None: 89 """Run mypy type checking. 90 91 Args: 92 session: Nox session. 93 """ 94 args = session.posargs or ["src/meltano"] 95 96 session.install(".") 97 session.install( 98 "mypy", 99 "sqlalchemy2-stubs", 100 "types-requests", 101 ) 102 session.run("mypy", *args) 103 [end of noxfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/noxfile.py b/noxfile.py --- a/noxfile.py +++ b/noxfile.py @@ -40,23 +40,20 @@ session.install(".") session.install( - "coverage[toml]", "freezegun", "mock", "pytest", "pytest-asyncio", + "pytest-cov", "pytest-docker", "pytest-order", "pytest-randomly", + "pytest-xdist", "requests-mock", ) try: session.run( - "coverage", - "run", - "--parallel", - "-m", "pytest", f"--randomly-seed={randint(0, 2**32-1)}", # noqa: S311, WPS432 *session.posargs,
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -40,23 +40,20 @@\n session.install(\".\")\n \n session.install(\n- \"coverage[toml]\",\n \"freezegun\",\n \"mock\",\n \"pytest\",\n \"pytest-asyncio\",\n+ \"pytest-cov\",\n \"pytest-docker\",\n \"pytest-order\",\n \"pytest-randomly\",\n+ \"pytest-xdist\",\n \"requests-mock\",\n )\n \n try:\n session.run(\n- \"coverage\",\n- \"run\",\n- \"--parallel\",\n- \"-m\",\n \"pytest\",\n f\"--randomly-seed={randint(0, 2**32-1)}\", # noqa: S311, WPS432\n *session.posargs,\n", "issue": "Run Pytest tests in parallel\nhttps://meltano.slack.com/archives/CKHP6G5V4/p1653508630941889\r\n\r\nLooks like pytest doesn't take 45 minutes, not sure why I though it was taking that long before? Maybe on gitlab CI updated the title to reflect that it takes ~5-10 minutes to run pytest today. \r\n\r\n1. Be sure https://github.com/meltano/meltano/pull/6155/files#diff-65c008e1065903299dfa0da25e1f18d0f87f387867ccbbf2b0888eb138bdb255L242 is implemented (This uses a seperate folder for each Test in your temp directory instead of the same folder over and over again)\r\n1. `poetry add -D pytest-xdist==1.*`\r\n1. `poetry run pytest -n 20` \r\n1. Watch tests run extremely quickly\r\n\r\nThere's still a large number of failures here as some tests depend on the other I believe? Not exactly sure what causes the failures, but it's clear they weren't all designed with parallel testing in mind. The gain is big enough it may be worth the time to figure out what causes the failures. \r\n\r\n\n", "before_files": [{"content": "\"\"\"Nox configuration.\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nfrom pathlib import Path\nfrom random import randint\nfrom textwrap import dedent\n\ntry:\n from nox_poetry import Session\n from nox_poetry import session as nox_session\nexcept ImportError:\n message = f\"\"\"\\\n Nox failed to import the 'nox-poetry' package.\n Please install it using the following command:\n {sys.executable} -m pip install nox-poetry\"\"\"\n raise SystemExit(dedent(message)) from None\n\n\npackage = \"meltano\"\npython_versions = [\"3.10\", \"3.9\", \"3.8\", \"3.7\"]\nmain_python_version = \"3.9\"\nlocations = \"src\", \"tests\", \"noxfile.py\"\n\n\n@nox_session(python=python_versions)\ndef tests(session: Session) -> None:\n \"\"\"Execute pytest tests and compute coverage.\n\n Args:\n session: Nox session.\n \"\"\"\n backend_db = os.environ.get(\"PYTEST_BACKEND\", \"sqlite\")\n\n if backend_db == \"mssql\":\n session.install(\".[mssql]\")\n else:\n session.install(\".\")\n\n session.install(\n \"coverage[toml]\",\n \"freezegun\",\n \"mock\",\n \"pytest\",\n \"pytest-asyncio\",\n \"pytest-docker\",\n \"pytest-order\",\n \"pytest-randomly\",\n \"requests-mock\",\n )\n\n try:\n session.run(\n \"coverage\",\n \"run\",\n \"--parallel\",\n \"-m\",\n \"pytest\",\n f\"--randomly-seed={randint(0, 2**32-1)}\", # noqa: S311, WPS432\n *session.posargs,\n env={\"NOX_CURRENT_SESSION\": \"tests\"},\n )\n finally:\n if session.interactive:\n session.notify(\"coverage\", posargs=[])\n\n\n@nox_session(python=main_python_version)\ndef coverage(session: Session) -> None:\n \"\"\"Upload coverage data.\n\n Args:\n session: Nox session.\n \"\"\"\n args = session.posargs or [\"report\"]\n\n session.install(\"coverage[toml]\")\n\n if not session.posargs and any(Path().glob(\".coverage.*\")):\n session.run(\"coverage\", \"combine\")\n\n session.run(\"coverage\", *args)\n\n\n@nox_session(python=main_python_version)\ndef mypy(session: Session) -> None:\n \"\"\"Run mypy type checking.\n\n Args:\n session: Nox session.\n \"\"\"\n args = session.posargs or [\"src/meltano\"]\n\n session.install(\".\")\n session.install(\n \"mypy\",\n \"sqlalchemy2-stubs\",\n \"types-requests\",\n )\n session.run(\"mypy\", *args)\n", "path": "noxfile.py"}]}
1,652
197
gh_patches_debug_18851
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-8363
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Problems with Spiders built on YextSpider I've noticed a few of the spiders whose results I was using in my OSM tool have recently stopped returning any results, including five_guys_de_es_fr_gb.py , matalan_gb.py , and three_gb.py . A common feature of these spiders is that they use the YextSpider class. Checking some other spiders that are also based on this class, reveals they've also stopped working. The spider stats suggest 404 and 403 errors are preventing the data being retrieved. If this is a general problem affecting multiple spiders, would someone be able to take a look and see if it can be fixed? </issue> <code> [start of locations/spiders/primanti_bros_us.py] 1 from locations.categories import Categories 2 from locations.storefinders.yext import YextSpider 3 4 5 class PrimantiBrosUSSpider(YextSpider): 6 name = "primanti_bros_us" 7 item_attributes = {"brand": "Primanti Bros", "brand_wikidata": "Q7243049", "extras": Categories.RESTAURANT.value} 8 api_key = "7515c25fc685bbdd7c5975b6573c6912" 9 api_version = "20220511" 10 11 def parse_item(self, item, location): 12 if "test-location" in item["ref"]: 13 return 14 item["ref"] = location.get("c_pagesURL") 15 item["name"] = location.get("c_searchName") 16 item["website"] = location.get("c_pagesURL") 17 item.pop("twitter", None) 18 yield item 19 [end of locations/spiders/primanti_bros_us.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/primanti_bros_us.py b/locations/spiders/primanti_bros_us.py --- a/locations/spiders/primanti_bros_us.py +++ b/locations/spiders/primanti_bros_us.py @@ -1,18 +1,13 @@ from locations.categories import Categories -from locations.storefinders.yext import YextSpider +from locations.storefinders.yext_answers import YextAnswersSpider -class PrimantiBrosUSSpider(YextSpider): +class PrimantiBrosUSSpider(YextAnswersSpider): name = "primanti_bros_us" item_attributes = {"brand": "Primanti Bros", "brand_wikidata": "Q7243049", "extras": Categories.RESTAURANT.value} api_key = "7515c25fc685bbdd7c5975b6573c6912" - api_version = "20220511" + experience_key = "locator" - def parse_item(self, item, location): - if "test-location" in item["ref"]: - return - item["ref"] = location.get("c_pagesURL") - item["name"] = location.get("c_searchName") - item["website"] = location.get("c_pagesURL") - item.pop("twitter", None) + def parse_item(self, location, item): + item["website"] = location["data"].get("landingPageUrl") yield item
{"golden_diff": "diff --git a/locations/spiders/primanti_bros_us.py b/locations/spiders/primanti_bros_us.py\n--- a/locations/spiders/primanti_bros_us.py\n+++ b/locations/spiders/primanti_bros_us.py\n@@ -1,18 +1,13 @@\n from locations.categories import Categories\n-from locations.storefinders.yext import YextSpider\n+from locations.storefinders.yext_answers import YextAnswersSpider\n \n \n-class PrimantiBrosUSSpider(YextSpider):\n+class PrimantiBrosUSSpider(YextAnswersSpider):\n name = \"primanti_bros_us\"\n item_attributes = {\"brand\": \"Primanti Bros\", \"brand_wikidata\": \"Q7243049\", \"extras\": Categories.RESTAURANT.value}\n api_key = \"7515c25fc685bbdd7c5975b6573c6912\"\n- api_version = \"20220511\"\n+ experience_key = \"locator\"\n \n- def parse_item(self, item, location):\n- if \"test-location\" in item[\"ref\"]:\n- return\n- item[\"ref\"] = location.get(\"c_pagesURL\")\n- item[\"name\"] = location.get(\"c_searchName\")\n- item[\"website\"] = location.get(\"c_pagesURL\")\n- item.pop(\"twitter\", None)\n+ def parse_item(self, location, item):\n+ item[\"website\"] = location[\"data\"].get(\"landingPageUrl\")\n yield item\n", "issue": "Problems with Spiders built on YextSpider\nI've noticed a few of the spiders whose results I was using in my OSM tool have recently stopped returning any results, including five_guys_de_es_fr_gb.py , matalan_gb.py , and three_gb.py .\r\n\r\nA common feature of these spiders is that they use the YextSpider class. Checking some other spiders that are also based on this class, reveals they've also stopped working. The spider stats suggest 404 and 403 errors are preventing the data being retrieved.\r\n\r\nIf this is a general problem affecting multiple spiders, would someone be able to take a look and see if it can be fixed?\n", "before_files": [{"content": "from locations.categories import Categories\nfrom locations.storefinders.yext import YextSpider\n\n\nclass PrimantiBrosUSSpider(YextSpider):\n name = \"primanti_bros_us\"\n item_attributes = {\"brand\": \"Primanti Bros\", \"brand_wikidata\": \"Q7243049\", \"extras\": Categories.RESTAURANT.value}\n api_key = \"7515c25fc685bbdd7c5975b6573c6912\"\n api_version = \"20220511\"\n\n def parse_item(self, item, location):\n if \"test-location\" in item[\"ref\"]:\n return\n item[\"ref\"] = location.get(\"c_pagesURL\")\n item[\"name\"] = location.get(\"c_searchName\")\n item[\"website\"] = location.get(\"c_pagesURL\")\n item.pop(\"twitter\", None)\n yield item\n", "path": "locations/spiders/primanti_bros_us.py"}]}
922
348
gh_patches_debug_466
rasdani/github-patches
git_diff
v7labs__darwin-py-315
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove unused requests_toolbelt requirement? AFAICT, requests_toolbelt is not used anywhere, yet it exists in the install_requires. https://github.com/v7labs/darwin-py/blob/878e9a0d639d6b70886bb53b00486908c3b6a14b/setup.py#L32 https://github.com/v7labs/darwin-py/search?q=requests_toolbelt </issue> <code> [start of setup.py] 1 import re 2 from pathlib import Path 3 4 import setuptools 5 6 with open("README.md", "rb") as f: 7 long_description = f.read().decode("utf-8") 8 9 with open(Path(__file__).parent / "darwin" / "__init__.py", "r") as f: 10 content = f.read() 11 # from https://www.py4u.net/discuss/139845 12 version = re.search(r'__version__\s*=\s*[\'"]([^\'"]*)[\'"]', content).group(1) 13 14 setuptools.setup( 15 name="darwin-py", 16 version=version, 17 author="V7", 18 author_email="[email protected]", 19 description="Library and command line interface for darwin.v7labs.com", 20 long_description=long_description, 21 long_description_content_type="text/markdown", 22 url="https://github.com/v7labs/darwin-py", 23 setup_requires=["wheel"], 24 install_requires=[ 25 "argcomplete", 26 "dataclasses", 27 "humanize", 28 "numpy", 29 "pillow", 30 "pyyaml>=5.1", 31 "requests", 32 "requests_toolbelt", 33 "rich", 34 "upolygon==0.1.6", 35 "jsonschema", 36 ], 37 extras_require={"test": ["responses", "pytest", "pytest-describe"], "ml": ["sklearn", "torch", "torchvision"]}, 38 packages=[ 39 "darwin", 40 "darwin.importer", 41 "darwin.dataset", 42 "darwin.torch", 43 "darwin.exporter", 44 "darwin.importer.formats", 45 "darwin.exporter.formats", 46 ], 47 entry_points={"console_scripts": ["darwin=darwin.cli:main"]}, 48 classifiers=["Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License"], 49 python_requires=">=3.6", 50 ) 51 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -29,7 +29,6 @@ "pillow", "pyyaml>=5.1", "requests", - "requests_toolbelt", "rich", "upolygon==0.1.6", "jsonschema",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,7 +29,6 @@\n \"pillow\",\n \"pyyaml>=5.1\",\n \"requests\",\n- \"requests_toolbelt\",\n \"rich\",\n \"upolygon==0.1.6\",\n \"jsonschema\",\n", "issue": "Remove unused requests_toolbelt requirement?\nAFAICT, requests_toolbelt is not used anywhere, yet it exists in the install_requires.\r\n\r\nhttps://github.com/v7labs/darwin-py/blob/878e9a0d639d6b70886bb53b00486908c3b6a14b/setup.py#L32\r\n\r\nhttps://github.com/v7labs/darwin-py/search?q=requests_toolbelt\n", "before_files": [{"content": "import re\nfrom pathlib import Path\n\nimport setuptools\n\nwith open(\"README.md\", \"rb\") as f:\n long_description = f.read().decode(\"utf-8\")\n\nwith open(Path(__file__).parent / \"darwin\" / \"__init__.py\", \"r\") as f:\n content = f.read()\n # from https://www.py4u.net/discuss/139845\n version = re.search(r'__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]', content).group(1)\n\nsetuptools.setup(\n name=\"darwin-py\",\n version=version,\n author=\"V7\",\n author_email=\"[email protected]\",\n description=\"Library and command line interface for darwin.v7labs.com\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/v7labs/darwin-py\",\n setup_requires=[\"wheel\"],\n install_requires=[\n \"argcomplete\",\n \"dataclasses\",\n \"humanize\",\n \"numpy\",\n \"pillow\",\n \"pyyaml>=5.1\",\n \"requests\",\n \"requests_toolbelt\",\n \"rich\",\n \"upolygon==0.1.6\",\n \"jsonschema\",\n ],\n extras_require={\"test\": [\"responses\", \"pytest\", \"pytest-describe\"], \"ml\": [\"sklearn\", \"torch\", \"torchvision\"]},\n packages=[\n \"darwin\",\n \"darwin.importer\",\n \"darwin.dataset\",\n \"darwin.torch\",\n \"darwin.exporter\",\n \"darwin.importer.formats\",\n \"darwin.exporter.formats\",\n ],\n entry_points={\"console_scripts\": [\"darwin=darwin.cli:main\"]},\n classifiers=[\"Programming Language :: Python :: 3\", \"License :: OSI Approved :: MIT License\"],\n python_requires=\">=3.6\",\n)\n", "path": "setup.py"}]}
1,133
78
gh_patches_debug_4655
rasdani/github-patches
git_diff
chainer__chainer-289
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `Function` which get no input cannot select `volatile` In `Function.__call__`, it checks `volatile` of all input variables, and the same value is used. But, `Function` which get not argument such as `Parameter`, always selects `volatile=False`. it may causes conflict of volatility. https://github.com/pfnet/chainer/blob/master/chainer/function.py#L132 `Parameter` always returns `False` here. </issue> <code> [start of chainer/functions/parameter.py] 1 import numpy 2 3 from chainer import function 4 from chainer.utils import type_check 5 6 7 class Parameter(function.Function): 8 9 """Function that outputs its weight array. 10 11 This is a parameterized function that takes no input and returns a variable 12 holding a shallow copy of the parameter array. 13 14 Args: 15 array: Initial parameter array. 16 17 """ 18 parameter_names = 'W', 19 gradient_names = 'gW', 20 21 def __init__(self, array): 22 self.W = array 23 self.gW = numpy.empty_like(array) 24 25 def check_type_forward(self, in_types): 26 type_check.expect(in_types.size() == 0) 27 28 def forward(self, x): 29 return self.W, 30 31 def backward(self, x, gy): 32 self.gW += gy[0] 33 return () 34 [end of chainer/functions/parameter.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/functions/parameter.py b/chainer/functions/parameter.py --- a/chainer/functions/parameter.py +++ b/chainer/functions/parameter.py @@ -22,6 +22,13 @@ self.W = array self.gW = numpy.empty_like(array) + def __call__(self, volatile=False): + ret = super(Parameter, self).__call__() + if volatile: + ret.unchain_backward() + ret.volatile = volatile + return ret + def check_type_forward(self, in_types): type_check.expect(in_types.size() == 0)
{"golden_diff": "diff --git a/chainer/functions/parameter.py b/chainer/functions/parameter.py\n--- a/chainer/functions/parameter.py\n+++ b/chainer/functions/parameter.py\n@@ -22,6 +22,13 @@\n self.W = array\n self.gW = numpy.empty_like(array)\n \n+ def __call__(self, volatile=False):\n+ ret = super(Parameter, self).__call__()\n+ if volatile:\n+ ret.unchain_backward()\n+ ret.volatile = volatile\n+ return ret\n+\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 0)\n", "issue": "`Function` which get no input cannot select `volatile`\nIn `Function.__call__`, it checks `volatile` of all input variables, and the same value is used. But, `Function` which get not argument such as `Parameter`, always selects `volatile=False`. it may causes conflict of volatility.\n\nhttps://github.com/pfnet/chainer/blob/master/chainer/function.py#L132\n`Parameter` always returns `False` here.\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Parameter(function.Function):\n\n \"\"\"Function that outputs its weight array.\n\n This is a parameterized function that takes no input and returns a variable\n holding a shallow copy of the parameter array.\n\n Args:\n array: Initial parameter array.\n\n \"\"\"\n parameter_names = 'W',\n gradient_names = 'gW',\n\n def __init__(self, array):\n self.W = array\n self.gW = numpy.empty_like(array)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 0)\n\n def forward(self, x):\n return self.W,\n\n def backward(self, x, gy):\n self.gW += gy[0]\n return ()\n", "path": "chainer/functions/parameter.py"}]}
866
138
gh_patches_debug_4060
rasdani/github-patches
git_diff
python-poetry__poetry-6824
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Exit code relay fails when run command is a script <!-- Hi there! Thank you for submitting a bug report! Before you submit your issue, please review and follow the instructions at https://python-poetry.org/docs/contributing/#reporting-bugs. Fully fill out the template below; insufficient information or bad reproduction instructions will impair the ability of others to help you. --> <!-- All the below information must be provided for others to understand and help with your issue. --> - **Poetry version**: `1.2.2` - **Python version**: `3.10.6` - **OS version and name**: macOS 12.5.1 - **pyproject.toml**: Relevant details below ``` [tool.poetry.scripts] toolname = "toolname.__main__:main" ``` <!-- All the below steps should be completed before submitting your issue. Checked checkbox should look like this: [x] --> - [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) stable Poetry version, installed using a recommended method. - [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate. - [x] I have consulted the [FAQ](https://python-poetry.org/docs/faq/) and [blog](https://python-poetry.org/blog/) for any relevant entries or release notes. - [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option) and have included the output below. ## Issue <!-- Now feel free to write your issue, and please be as descriptive as possible! Make sure to include detailed reproduction steps. --> <!-- Thanks again 🙌 ❤ --> Hi there, When using `poetry run` to run a script defined in the [`scripts`](https://python-poetry.org/docs/pyproject/#scripts) section of `pyproject.toml` the exit code of the script is not relayed as the final exit code of `poetry` itself. I originally noticed https://github.com/python-poetry/poetry/issues/5773 and https://github.com/python-poetry/poetry/issues/2369 and was wondering why it wasn't working for me. Consider the following cases: ``` $ poetry run python -m toolname --version 0.5.0 $ echo $? 1 ``` ``` $ poetry run toolname --version 0.5.0 $ echo $? 0 ``` It looks like https://github.com/python-poetry/poetry/pull/4456 did not fix https://github.com/python-poetry/poetry/issues/2369 because these are different execution paths. Looking at [`commands/run.py`](https://github.com/python-poetry/poetry/blob/16046d9ac9b72a49e1bc4618fb686695cc64821c/src/poetry/console/commands/run.py#L61), running a script (as defined in `pyproject.toml`) is a different execution path than running non-scripts. As mentioned in https://github.com/python-poetry/poetry/issues/2369, I believe the fix would be: `f"import_module('{module}').{callable_}()"` -> `f"sys.exit(import_module('{module}').{callable_}())"` I made this change locally and it fixed this issue for me. Alternatively, I believe another fix would be removing the `RunCommand.run_script` special-case code. Since `poetry` knows where the script is (`$VIRTUALENV/bin/`), it could include this directory in the `$PATH` when executing the command. `poetry` would then execute the script generated by [`builders/editable.py`](https://github.com/python-poetry/poetry/blob/master/src/poetry/masonry/builders/editable.py#L31). There may be reasons these two code paths are distinct that I don't understand, but if they can be unified to a single path that would ease maintenance burden and help alleviate situations like this where one path is updated but the other is forgotten. </issue> <code> [start of src/poetry/console/commands/run.py] 1 from __future__ import annotations 2 3 from typing import TYPE_CHECKING 4 5 from cleo.helpers import argument 6 7 from poetry.console.commands.env_command import EnvCommand 8 9 10 if TYPE_CHECKING: 11 from poetry.core.masonry.utils.module import Module 12 13 14 class RunCommand(EnvCommand): 15 name = "run" 16 description = "Runs a command in the appropriate environment." 17 18 arguments = [ 19 argument("args", "The command and arguments/options to run.", multiple=True) 20 ] 21 22 def handle(self) -> int: 23 args = self.argument("args") 24 script = args[0] 25 scripts = self.poetry.local_config.get("scripts") 26 27 if scripts and script in scripts: 28 return self.run_script(scripts[script], args) 29 30 try: 31 return self.env.execute(*args) 32 except FileNotFoundError: 33 self.line_error(f"<error>Command not found: <c1>{script}</c1></error>") 34 return 1 35 36 @property 37 def _module(self) -> Module: 38 from poetry.core.masonry.utils.module import Module 39 40 poetry = self.poetry 41 package = poetry.package 42 path = poetry.file.parent 43 module = Module(package.name, path.as_posix(), package.packages) 44 45 return module 46 47 def run_script(self, script: str | dict[str, str], args: str) -> int: 48 if isinstance(script, dict): 49 script = script["callable"] 50 51 module, callable_ = script.split(":") 52 53 src_in_sys_path = "sys.path.append('src'); " if self._module.is_in_src() else "" 54 55 cmd = ["python", "-c"] 56 57 cmd += [ 58 "import sys; " 59 "from importlib import import_module; " 60 f"sys.argv = {args!r}; {src_in_sys_path}" 61 f"import_module('{module}').{callable_}()" 62 ] 63 64 return self.env.execute(*cmd) 65 [end of src/poetry/console/commands/run.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/poetry/console/commands/run.py b/src/poetry/console/commands/run.py --- a/src/poetry/console/commands/run.py +++ b/src/poetry/console/commands/run.py @@ -58,7 +58,7 @@ "import sys; " "from importlib import import_module; " f"sys.argv = {args!r}; {src_in_sys_path}" - f"import_module('{module}').{callable_}()" + f"sys.exit(import_module('{module}').{callable_}())" ] return self.env.execute(*cmd)
{"golden_diff": "diff --git a/src/poetry/console/commands/run.py b/src/poetry/console/commands/run.py\n--- a/src/poetry/console/commands/run.py\n+++ b/src/poetry/console/commands/run.py\n@@ -58,7 +58,7 @@\n \"import sys; \"\n \"from importlib import import_module; \"\n f\"sys.argv = {args!r}; {src_in_sys_path}\"\n- f\"import_module('{module}').{callable_}()\"\n+ f\"sys.exit(import_module('{module}').{callable_}())\"\n ]\n \n return self.env.execute(*cmd)\n", "issue": "Exit code relay fails when run command is a script\n<!--\r\n Hi there! Thank you for submitting a bug report!\r\n\r\n Before you submit your issue, please review and follow the instructions at\r\n https://python-poetry.org/docs/contributing/#reporting-bugs. Fully fill out the template below; insufficient\r\n information or bad reproduction instructions will impair the ability of others to help you.\r\n-->\r\n\r\n<!-- All the below information must be provided for others to understand and help with your issue. -->\r\n- **Poetry version**: `1.2.2`\r\n- **Python version**: `3.10.6`\r\n- **OS version and name**: macOS 12.5.1\r\n- **pyproject.toml**: Relevant details below\r\n\r\n```\r\n[tool.poetry.scripts]\r\ntoolname = \"toolname.__main__:main\"\r\n```\r\n\r\n<!-- All the below steps should be completed before submitting your issue. Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) stable Poetry version, installed using a recommended method.\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] I have consulted the [FAQ](https://python-poetry.org/docs/faq/) and [blog](https://python-poetry.org/blog/) for any relevant entries or release notes.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option) and have included the output below.\r\n\r\n## Issue\r\n<!-- Now feel free to write your issue, and please be as descriptive as possible! Make sure to include detailed reproduction steps. -->\r\n<!-- Thanks again \ud83d\ude4c \u2764 -->\r\n\r\nHi there,\r\n\r\nWhen using `poetry run` to run a script defined in the [`scripts`](https://python-poetry.org/docs/pyproject/#scripts) section of `pyproject.toml` the exit code of the script is not relayed as the final exit code of `poetry` itself. I originally noticed https://github.com/python-poetry/poetry/issues/5773 and https://github.com/python-poetry/poetry/issues/2369 and was wondering why it wasn't working for me.\r\n\r\nConsider the following cases:\r\n\r\n```\r\n$ poetry run python -m toolname --version\r\n0.5.0\r\n$ echo $?\r\n1\r\n```\r\n\r\n```\r\n$ poetry run toolname --version\r\n0.5.0\r\n$ echo $?\r\n0\r\n```\r\n\r\nIt looks like https://github.com/python-poetry/poetry/pull/4456 did not fix https://github.com/python-poetry/poetry/issues/2369 because these are different execution paths. Looking at [`commands/run.py`](https://github.com/python-poetry/poetry/blob/16046d9ac9b72a49e1bc4618fb686695cc64821c/src/poetry/console/commands/run.py#L61), running a script (as defined in `pyproject.toml`) is a different execution path than running non-scripts. As mentioned in https://github.com/python-poetry/poetry/issues/2369, I believe the fix would be:\r\n\r\n`f\"import_module('{module}').{callable_}()\"` -> `f\"sys.exit(import_module('{module}').{callable_}())\"`\r\n\r\nI made this change locally and it fixed this issue for me.\r\n\r\nAlternatively, I believe another fix would be removing the `RunCommand.run_script` special-case code. Since `poetry` knows where the script is (`$VIRTUALENV/bin/`), it could include this directory in the `$PATH` when executing the command. `poetry` would then execute the script generated by [`builders/editable.py`](https://github.com/python-poetry/poetry/blob/master/src/poetry/masonry/builders/editable.py#L31).\r\n\r\nThere may be reasons these two code paths are distinct that I don't understand, but if they can be unified to a single path that would ease maintenance burden and help alleviate situations like this where one path is updated but the other is forgotten.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom cleo.helpers import argument\n\nfrom poetry.console.commands.env_command import EnvCommand\n\n\nif TYPE_CHECKING:\n from poetry.core.masonry.utils.module import Module\n\n\nclass RunCommand(EnvCommand):\n name = \"run\"\n description = \"Runs a command in the appropriate environment.\"\n\n arguments = [\n argument(\"args\", \"The command and arguments/options to run.\", multiple=True)\n ]\n\n def handle(self) -> int:\n args = self.argument(\"args\")\n script = args[0]\n scripts = self.poetry.local_config.get(\"scripts\")\n\n if scripts and script in scripts:\n return self.run_script(scripts[script], args)\n\n try:\n return self.env.execute(*args)\n except FileNotFoundError:\n self.line_error(f\"<error>Command not found: <c1>{script}</c1></error>\")\n return 1\n\n @property\n def _module(self) -> Module:\n from poetry.core.masonry.utils.module import Module\n\n poetry = self.poetry\n package = poetry.package\n path = poetry.file.parent\n module = Module(package.name, path.as_posix(), package.packages)\n\n return module\n\n def run_script(self, script: str | dict[str, str], args: str) -> int:\n if isinstance(script, dict):\n script = script[\"callable\"]\n\n module, callable_ = script.split(\":\")\n\n src_in_sys_path = \"sys.path.append('src'); \" if self._module.is_in_src() else \"\"\n\n cmd = [\"python\", \"-c\"]\n\n cmd += [\n \"import sys; \"\n \"from importlib import import_module; \"\n f\"sys.argv = {args!r}; {src_in_sys_path}\"\n f\"import_module('{module}').{callable_}()\"\n ]\n\n return self.env.execute(*cmd)\n", "path": "src/poetry/console/commands/run.py"}]}
1,993
138
gh_patches_debug_20245
rasdani/github-patches
git_diff
translate__translate-3405
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Backwards incompatible change in multistring The behavior of the `multistring` class with respect to string comparisons has changed in 1.14-rc1, more specifically when the `autoencode` class was removed in 4f9ee7edfc9. At least this is the case in Python 2.7. I have experienced this in some custom Pootle code when testing out TTK from master — a multistring with non-ASCII characters was being compared against the empty byte string `''`, and it resulted in a encoding error. I can reproduce this in a test case: ``` python from translate.misc import multistring def test_cmp(): t = multistring.multistring s1 = t("test") assert s1.__cmp__("test") == 0 s2 = t(u"téßt") assert s2.__cmp__("test") > 0 ``` (Note that for 1.13.x the multistring constructor needs an `encoding='UTF-8'` parameter in the test). I'm not sure if this could be considered a bug, after all my understanding is apps/libraries should internally work with code points and not bytes, however this is a breaking change that might potentially affect existing consumer code so I consider it's worth bringing this up to hear people's thoughts. </issue> <code> [start of translate/misc/multistring.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright 2006 Zuza Software Foundation 4 # 5 # This file is part of translate. 6 # 7 # translate is free software; you can redistribute it and/or modify 8 # it under the terms of the GNU General Public License as published by 9 # the Free Software Foundation; either version 2 of the License, or 10 # (at your option) any later version. 11 # 12 # translate is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU General Public License for more details. 16 # 17 # You should have received a copy of the GNU General Public License 18 # along with this program; if not, see <http://www.gnu.org/licenses/>. 19 20 """Supports a hybrid Unicode string that can also have a list of alternate 21 strings in the strings attribute""" 22 23 import six 24 25 26 class multistring(six.text_type): 27 28 def __new__(newtype, string=u"", *args, **kwargs): 29 if isinstance(string, list): 30 if not string: 31 raise ValueError("multistring must contain at least one string") 32 mainstring = string[0] 33 newstring = multistring.__new__(newtype, string[0]) 34 newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]] 35 else: 36 newstring = six.text_type.__new__(newtype, string) 37 newstring.strings = [newstring] 38 return newstring 39 40 def __init__(self, *args, **kwargs): 41 super(multistring, self).__init__() 42 if not hasattr(self, "strings"): 43 self.strings = [] 44 45 def __cmp__(self, otherstring): 46 def cmp_compat(s1, s2): 47 # Python 3 compatible cmp() equivalent 48 return (s1 > s2) - (s1 < s2) 49 if isinstance(otherstring, multistring): 50 parentcompare = cmp_compat(six.text_type(self), otherstring) 51 if parentcompare: 52 return parentcompare 53 else: 54 return cmp_compat(self.strings[1:], otherstring.strings[1:]) 55 elif isinstance(otherstring, six.text_type): 56 return cmp_compat(six.text_type(self), otherstring) 57 elif isinstance(otherstring, bytes): 58 return cmp_compat(bytes(self), otherstring) 59 elif isinstance(otherstring, list) and otherstring: 60 return cmp_compat(self, multistring(otherstring)) 61 else: 62 return cmp_compat(str(type(self)), str(type(otherstring))) 63 64 def __hash__(self): 65 return hash(''.join(self.strings)) 66 67 def __ne__(self, otherstring): 68 return self.__cmp__(otherstring) != 0 69 70 def __eq__(self, otherstring): 71 return self.__cmp__(otherstring) == 0 72 73 def __repr__(self): 74 return "multistring([" + ",".join(self.strings) + "])" 75 76 def replace(self, old, new, count=None): 77 if count is None: 78 newstr = multistring(super(multistring, self).replace(old, new)) 79 else: 80 newstr = multistring(super(multistring, self).replace(old, new, count)) 81 for s in self.strings[1:]: 82 if count is None: 83 newstr.strings.append(s.replace(old, new)) 84 else: 85 newstr.strings.append(s.replace(old, new, count)) 86 return newstr 87 [end of translate/misc/multistring.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/translate/misc/multistring.py b/translate/misc/multistring.py --- a/translate/misc/multistring.py +++ b/translate/misc/multistring.py @@ -55,7 +55,7 @@ elif isinstance(otherstring, six.text_type): return cmp_compat(six.text_type(self), otherstring) elif isinstance(otherstring, bytes): - return cmp_compat(bytes(self), otherstring) + return cmp_compat(self.encode('utf-8'), otherstring) elif isinstance(otherstring, list) and otherstring: return cmp_compat(self, multistring(otherstring)) else: @@ -71,7 +71,8 @@ return self.__cmp__(otherstring) == 0 def __repr__(self): - return "multistring([" + ",".join(self.strings) + "])" + _repr = "multistring([" + u",".join(self.strings) + "])" + return _repr.encode('utf-8') if six.PY2 else _repr def replace(self, old, new, count=None): if count is None:
{"golden_diff": "diff --git a/translate/misc/multistring.py b/translate/misc/multistring.py\n--- a/translate/misc/multistring.py\n+++ b/translate/misc/multistring.py\n@@ -55,7 +55,7 @@\n elif isinstance(otherstring, six.text_type):\n return cmp_compat(six.text_type(self), otherstring)\n elif isinstance(otherstring, bytes):\n- return cmp_compat(bytes(self), otherstring)\n+ return cmp_compat(self.encode('utf-8'), otherstring)\n elif isinstance(otherstring, list) and otherstring:\n return cmp_compat(self, multistring(otherstring))\n else:\n@@ -71,7 +71,8 @@\n return self.__cmp__(otherstring) == 0\n \n def __repr__(self):\n- return \"multistring([\" + \",\".join(self.strings) + \"])\"\n+ _repr = \"multistring([\" + u\",\".join(self.strings) + \"])\"\n+ return _repr.encode('utf-8') if six.PY2 else _repr\n \n def replace(self, old, new, count=None):\n if count is None:\n", "issue": "Backwards incompatible change in multistring\nThe behavior of the `multistring` class with respect to string comparisons has changed in 1.14-rc1, more specifically when the `autoencode` class was removed in 4f9ee7edfc9. At least this is the case in Python 2.7.\n\nI have experienced this in some custom Pootle code when testing out TTK from master \u2014 a multistring with non-ASCII characters was being compared against the empty byte string `''`, and it resulted in a encoding error. I can reproduce this in a test case:\n\n``` python\nfrom translate.misc import multistring\n\ndef test_cmp():\n t = multistring.multistring\n s1 = t(\"test\")\n assert s1.__cmp__(\"test\") == 0\n\n s2 = t(u\"t\u00e9\u00dft\")\n assert s2.__cmp__(\"test\") > 0\n```\n\n(Note that for 1.13.x the multistring constructor needs an `encoding='UTF-8'` parameter in the test).\n\nI'm not sure if this could be considered a bug, after all my understanding is apps/libraries should internally work with code points and not bytes, however this is a breaking change that might potentially affect existing consumer code so I consider it's worth bringing this up to hear people's thoughts.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Supports a hybrid Unicode string that can also have a list of alternate\nstrings in the strings attribute\"\"\"\n\nimport six\n\n\nclass multistring(six.text_type):\n\n def __new__(newtype, string=u\"\", *args, **kwargs):\n if isinstance(string, list):\n if not string:\n raise ValueError(\"multistring must contain at least one string\")\n mainstring = string[0]\n newstring = multistring.__new__(newtype, string[0])\n newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]\n else:\n newstring = six.text_type.__new__(newtype, string)\n newstring.strings = [newstring]\n return newstring\n\n def __init__(self, *args, **kwargs):\n super(multistring, self).__init__()\n if not hasattr(self, \"strings\"):\n self.strings = []\n\n def __cmp__(self, otherstring):\n def cmp_compat(s1, s2):\n # Python 3 compatible cmp() equivalent\n return (s1 > s2) - (s1 < s2)\n if isinstance(otherstring, multistring):\n parentcompare = cmp_compat(six.text_type(self), otherstring)\n if parentcompare:\n return parentcompare\n else:\n return cmp_compat(self.strings[1:], otherstring.strings[1:])\n elif isinstance(otherstring, six.text_type):\n return cmp_compat(six.text_type(self), otherstring)\n elif isinstance(otherstring, bytes):\n return cmp_compat(bytes(self), otherstring)\n elif isinstance(otherstring, list) and otherstring:\n return cmp_compat(self, multistring(otherstring))\n else:\n return cmp_compat(str(type(self)), str(type(otherstring)))\n\n def __hash__(self):\n return hash(''.join(self.strings))\n\n def __ne__(self, otherstring):\n return self.__cmp__(otherstring) != 0\n\n def __eq__(self, otherstring):\n return self.__cmp__(otherstring) == 0\n\n def __repr__(self):\n return \"multistring([\" + \",\".join(self.strings) + \"])\"\n\n def replace(self, old, new, count=None):\n if count is None:\n newstr = multistring(super(multistring, self).replace(old, new))\n else:\n newstr = multistring(super(multistring, self).replace(old, new, count))\n for s in self.strings[1:]:\n if count is None:\n newstr.strings.append(s.replace(old, new))\n else:\n newstr.strings.append(s.replace(old, new, count))\n return newstr\n", "path": "translate/misc/multistring.py"}]}
1,732
245
gh_patches_debug_15464
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-1870
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Elasticsearch sanitization does not work for bulk queries **Describe your environment** Discovered in elasticsearch 5.5.3 and elasticsearch-dsl 5.4.0 and caused by moving to the default sanitization in #1758. The issue is illustrated here where `body` comes in as a string, not as a dictionary: <img width="1355" alt="image" src="https://github.com/open-telemetry/opentelemetry-python-contrib/assets/684275/0f49ce7d-9558-4148-a2fd-28dc4bd70983"> This is caseud by the bulk flow specifically as the body gets translated to a string here: <img width="1174" alt="image" src="https://github.com/open-telemetry/opentelemetry-python-contrib/assets/684275/2106e7aa-2bde-4579-b562-a8dc23007a1c"> which looks like this: <img width="1144" alt="image" src="https://github.com/open-telemetry/opentelemetry-python-contrib/assets/684275/ae72d60a-5047-4559-ad05-6fc1ddb2658d"> **Steps to reproduce** I don't have a super straightforward way to reproduce other than to use the bulk API from elasticsearch. **What is the expected behavior?** What did you expect to see? **What is the actual behavior?** The below stacktrace: ``` File "/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/elasticsearch/helpers/__init__.py", line 95, in _process_bulk_chunk resp = client.bulk('\n'.join(bulk_actions) + '\n', **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/elasticsearch/client/utils.py", line 73, in _wrapped return func(*args, params=params, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/elasticsearch/client/__init__.py", line 1173, in bulk return self.transport.perform_request('POST', _make_path(index, ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/opentelemetry/instrumentation/elasticsearch/__init__.py", line 224, in wrapper attributes[SpanAttributes.DB_STATEMENT] = sanitize_body( ^^^^^^^^^^^^^^ File "/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/opentelemetry/instrumentation/elasticsearch/utils.py", line 54, in sanitize_body flatten_body = _flatten_dict(body) ^^^^^^^^^^^^^^^^^^^ File "/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/opentelemetry/instrumentation/elasticsearch/utils.py", line 30, in _flatten_dict for k, v in d.items(): ^^^^^^^ AttributeError: 'str' object has no attribute 'items' ``` **Additional context** Add any other context about the problem here. </issue> <code> [start of instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 sanitized_keys = ( 16 "message", 17 "should", 18 "filter", 19 "query", 20 "queries", 21 "intervals", 22 "match", 23 ) 24 sanitized_value = "?" 25 26 27 # pylint: disable=C0103 28 def _flatten_dict(d, parent_key=""): 29 items = [] 30 for k, v in d.items(): 31 new_key = parent_key + "." + k if parent_key else k 32 # recursive call _flatten_dict for a non-empty dict value 33 if isinstance(v, dict) and v: 34 items.extend(_flatten_dict(v, new_key).items()) 35 else: 36 items.append((new_key, v)) 37 return dict(items) 38 39 40 def _unflatten_dict(d): 41 res = {} 42 for k, v in d.items(): 43 keys = k.split(".") 44 d = res 45 for key in keys[:-1]: 46 if key not in d: 47 d[key] = {} 48 d = d[key] 49 d[keys[-1]] = v 50 return res 51 52 53 def sanitize_body(body) -> str: 54 flatten_body = _flatten_dict(body) 55 56 for key in flatten_body: 57 if key.endswith(sanitized_keys): 58 flatten_body[key] = sanitized_value 59 60 return str(_unflatten_dict(flatten_body)) 61 [end of instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py --- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py +++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import json sanitized_keys = ( "message", @@ -51,6 +52,9 @@ def sanitize_body(body) -> str: + if isinstance(body, str): + body = json.loads(body) + flatten_body = _flatten_dict(body) for key in flatten_body:
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py\n--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py\n+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py\n@@ -11,6 +11,7 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+import json\n \n sanitized_keys = (\n \"message\",\n@@ -51,6 +52,9 @@\n \n \n def sanitize_body(body) -> str:\n+ if isinstance(body, str):\n+ body = json.loads(body)\n+\n flatten_body = _flatten_dict(body)\n \n for key in flatten_body:\n", "issue": "Elasticsearch sanitization does not work for bulk queries\n**Describe your environment**\r\n\r\nDiscovered in elasticsearch 5.5.3 and elasticsearch-dsl 5.4.0 and caused by moving to the default sanitization in #1758.\r\n\r\nThe issue is illustrated here where `body` comes in as a string, not as a dictionary:\r\n\r\n<img width=\"1355\" alt=\"image\" src=\"https://github.com/open-telemetry/opentelemetry-python-contrib/assets/684275/0f49ce7d-9558-4148-a2fd-28dc4bd70983\">\r\n\r\nThis is caseud by the bulk flow specifically as the body gets translated to a string here:\r\n\r\n<img width=\"1174\" alt=\"image\" src=\"https://github.com/open-telemetry/opentelemetry-python-contrib/assets/684275/2106e7aa-2bde-4579-b562-a8dc23007a1c\">\r\n\r\nwhich looks like this:\r\n\r\n<img width=\"1144\" alt=\"image\" src=\"https://github.com/open-telemetry/opentelemetry-python-contrib/assets/684275/ae72d60a-5047-4559-ad05-6fc1ddb2658d\">\r\n\r\n\r\n**Steps to reproduce**\r\n\r\nI don't have a super straightforward way to reproduce other than to use the bulk API from elasticsearch.\r\n\r\n\r\n**What is the expected behavior?**\r\nWhat did you expect to see?\r\n\r\n**What is the actual behavior?**\r\n\r\nThe below stacktrace:\r\n\r\n```\r\n File \"/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/elasticsearch/helpers/__init__.py\", line 95, in _process_bulk_chunk\r\n resp = client.bulk('\\n'.join(bulk_actions) + '\\n', **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/elasticsearch/client/utils.py\", line 73, in _wrapped\r\n return func(*args, params=params, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/elasticsearch/client/__init__.py\", line 1173, in bulk\r\n return self.transport.perform_request('POST', _make_path(index,\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/opentelemetry/instrumentation/elasticsearch/__init__.py\", line 224, in wrapper\r\n attributes[SpanAttributes.DB_STATEMENT] = sanitize_body(\r\n ^^^^^^^^^^^^^^\r\n File \"/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/opentelemetry/instrumentation/elasticsearch/utils.py\", line 54, in sanitize_body\r\n flatten_body = _flatten_dict(body)\r\n ^^^^^^^^^^^^^^^^^^^\r\n File \"/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/opentelemetry/instrumentation/elasticsearch/utils.py\", line 30, in _flatten_dict\r\n for k, v in d.items():\r\n ^^^^^^^\r\nAttributeError: 'str' object has no attribute 'items'\r\n\r\n```\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nsanitized_keys = (\n \"message\",\n \"should\",\n \"filter\",\n \"query\",\n \"queries\",\n \"intervals\",\n \"match\",\n)\nsanitized_value = \"?\"\n\n\n# pylint: disable=C0103\ndef _flatten_dict(d, parent_key=\"\"):\n items = []\n for k, v in d.items():\n new_key = parent_key + \".\" + k if parent_key else k\n # recursive call _flatten_dict for a non-empty dict value\n if isinstance(v, dict) and v:\n items.extend(_flatten_dict(v, new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\ndef _unflatten_dict(d):\n res = {}\n for k, v in d.items():\n keys = k.split(\".\")\n d = res\n for key in keys[:-1]:\n if key not in d:\n d[key] = {}\n d = d[key]\n d[keys[-1]] = v\n return res\n\n\ndef sanitize_body(body) -> str:\n flatten_body = _flatten_dict(body)\n\n for key in flatten_body:\n if key.endswith(sanitized_keys):\n flatten_body[key] = sanitized_value\n\n return str(_unflatten_dict(flatten_body))\n", "path": "instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py"}]}
1,901
212
gh_patches_debug_37982
rasdani/github-patches
git_diff
microsoft__botbuilder-python-1244
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [PORT] Implement hash for App Insights session ID > Port this change from botbuilder-dotnet/master branch: https://github.com/microsoft/botbuilder-dotnet/pull/3317 Addresses this issue reported in JS https://github.com/microsoft/botbuilder-js/issues/1512 App Insights has a maximum session ID of 64 characters, but in some instances for some channels (such as reported with Teams) this may be exceeded due to conversation ID currently being used for session ID. This PR hashes the conversation ID and sets this as the session ID. It also adds an additional telemetry property to ensure we retain the original conversation ID within the telemetry. The hashed ID is only used for Application Insights and the original conversation ID and activity are left untouched. # Changed projects * integration </issue> <code> [start of libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/processor/telemetry_processor.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 import json 4 from abc import ABC, abstractmethod 5 6 7 class TelemetryProcessor(ABC): 8 """Application Insights Telemetry Processor base class for Bot""" 9 10 @property 11 def activity_json(self) -> json: 12 """Retrieve the request body as json (Activity).""" 13 body_text = self.get_request_body() 14 body = json.loads(body_text) if body_text is not None else None 15 return body 16 17 @abstractmethod 18 def can_process(self) -> bool: 19 """Whether the processor can process the request body. 20 :return: True if the request body can be processed, False otherwise. 21 :rtype: bool 22 """ 23 return False 24 25 @abstractmethod 26 def get_request_body(self) -> str: # pylint: disable=inconsistent-return-statements 27 """Retrieve the request body from flask/django middleware component.""" 28 raise NotImplementedError() 29 30 def __call__(self, data, context) -> bool: 31 """ Traditional Web user and session ID's don't apply for Bots. This processor 32 replaces the identifiers to be consistent with Bot Framework's notion of 33 user and session id's. 34 35 Each event that gets logged (with this processor added) will contain additional 36 properties. 37 38 The following properties are replaced: 39 - context.user.id - The user ID that Application Insights uses to identify 40 a unique user. 41 - context.session.id - The session ID that APplication Insights uses to 42 identify a unique session. 43 44 In addition, the additional data properties are added: 45 - activityId - The Bot Framework's Activity ID which represents a unique 46 message identifier. 47 - channelId - The Bot Framework "Channel" (ie, slack/facebook/directline/etc) 48 - activityType - The Bot Framework message classification (ie, message) 49 50 :param data: Data from Application Insights 51 :type data: telemetry item 52 :param context: Context from Application Insights 53 :type context: context object 54 :returns: bool -- determines if the event is passed to the server (False = Filtered). 55 """ 56 57 post_data = self.activity_json 58 if post_data is None: 59 # If there is no body (not a BOT request or not configured correctly). 60 # We *could* filter here, but we're allowing event to go through. 61 return True 62 63 # Override session and user id 64 from_prop = post_data["from"] if "from" in post_data else None 65 user_id = from_prop["id"] if from_prop is not None else None 66 channel_id = post_data["channelId"] if "channelId" in post_data else None 67 conversation = ( 68 post_data["conversation"] if "conversation" in post_data else None 69 ) 70 conversation_id = conversation["id"] if "id" in conversation else None 71 context.user.id = channel_id + user_id 72 context.session.id = conversation_id 73 74 # Additional bot-specific properties 75 if "id" in post_data: 76 data.properties["activityId"] = post_data["id"] 77 if "channelId" in post_data: 78 data.properties["channelId"] = post_data["channelId"] 79 if "type" in post_data: 80 data.properties["activityType"] = post_data["type"] 81 return True 82 [end of libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/processor/telemetry_processor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/processor/telemetry_processor.py b/libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/processor/telemetry_processor.py --- a/libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/processor/telemetry_processor.py +++ b/libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/processor/telemetry_processor.py @@ -1,7 +1,9 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. +import base64 import json from abc import ABC, abstractmethod +from _sha256 import sha256 class TelemetryProcessor(ABC): @@ -11,8 +13,9 @@ def activity_json(self) -> json: """Retrieve the request body as json (Activity).""" body_text = self.get_request_body() - body = json.loads(body_text) if body_text is not None else None - return body + if body_text: + return body_text if isinstance(body_text, dict) else json.loads(body_text) + return None @abstractmethod def can_process(self) -> bool: @@ -67,15 +70,34 @@ conversation = ( post_data["conversation"] if "conversation" in post_data else None ) - conversation_id = conversation["id"] if "id" in conversation else None + + session_id = "" + if "id" in conversation: + conversation_id = conversation["id"] + session_id = base64.b64encode( + sha256(conversation_id.encode("utf-8")).digest() + ).decode() + + # Set the user id on the Application Insights telemetry item. context.user.id = channel_id + user_id - context.session.id = conversation_id - # Additional bot-specific properties + # Set the session id on the Application Insights telemetry item. + # Hashed ID is used due to max session ID length for App Insights session Id + context.session.id = session_id + + # Set the activity id: + # https://github.com/Microsoft/botframework-obi/blob/master/botframework-activity/botframework-activity.md#id if "id" in post_data: data.properties["activityId"] = post_data["id"] + + # Set the channel id: + # https://github.com/Microsoft/botframework-obi/blob/master/botframework-activity/botframework-activity.md#channel-id if "channelId" in post_data: data.properties["channelId"] = post_data["channelId"] + + # Set the activity type: + # https://github.com/Microsoft/botframework-obi/blob/master/botframework-activity/botframework-activity.md#type if "type" in post_data: data.properties["activityType"] = post_data["type"] + return True
{"golden_diff": "diff --git a/libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/processor/telemetry_processor.py b/libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/processor/telemetry_processor.py\n--- a/libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/processor/telemetry_processor.py\n+++ b/libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/processor/telemetry_processor.py\n@@ -1,7 +1,9 @@\n # Copyright (c) Microsoft Corporation. All rights reserved.\n # Licensed under the MIT License.\n+import base64\n import json\n from abc import ABC, abstractmethod\n+from _sha256 import sha256\n \n \n class TelemetryProcessor(ABC):\n@@ -11,8 +13,9 @@\n def activity_json(self) -> json:\n \"\"\"Retrieve the request body as json (Activity).\"\"\"\n body_text = self.get_request_body()\n- body = json.loads(body_text) if body_text is not None else None\n- return body\n+ if body_text:\n+ return body_text if isinstance(body_text, dict) else json.loads(body_text)\n+ return None\n \n @abstractmethod\n def can_process(self) -> bool:\n@@ -67,15 +70,34 @@\n conversation = (\n post_data[\"conversation\"] if \"conversation\" in post_data else None\n )\n- conversation_id = conversation[\"id\"] if \"id\" in conversation else None\n+\n+ session_id = \"\"\n+ if \"id\" in conversation:\n+ conversation_id = conversation[\"id\"]\n+ session_id = base64.b64encode(\n+ sha256(conversation_id.encode(\"utf-8\")).digest()\n+ ).decode()\n+\n+ # Set the user id on the Application Insights telemetry item.\n context.user.id = channel_id + user_id\n- context.session.id = conversation_id\n \n- # Additional bot-specific properties\n+ # Set the session id on the Application Insights telemetry item.\n+ # Hashed ID is used due to max session ID length for App Insights session Id\n+ context.session.id = session_id\n+\n+ # Set the activity id:\n+ # https://github.com/Microsoft/botframework-obi/blob/master/botframework-activity/botframework-activity.md#id\n if \"id\" in post_data:\n data.properties[\"activityId\"] = post_data[\"id\"]\n+\n+ # Set the channel id:\n+ # https://github.com/Microsoft/botframework-obi/blob/master/botframework-activity/botframework-activity.md#channel-id\n if \"channelId\" in post_data:\n data.properties[\"channelId\"] = post_data[\"channelId\"]\n+\n+ # Set the activity type:\n+ # https://github.com/Microsoft/botframework-obi/blob/master/botframework-activity/botframework-activity.md#type\n if \"type\" in post_data:\n data.properties[\"activityType\"] = post_data[\"type\"]\n+\n return True\n", "issue": "[PORT] Implement hash for App Insights session ID\n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/3317\n\nAddresses this issue reported in JS https://github.com/microsoft/botbuilder-js/issues/1512\r\n\r\nApp Insights has a maximum session ID of 64 characters, but in some instances for some channels (such as reported with Teams) this may be exceeded due to conversation ID currently being used for session ID. This PR hashes the conversation ID and sets this as the session ID. It also adds an additional telemetry property to ensure we retain the original conversation ID within the telemetry.\r\n\r\nThe hashed ID is only used for Application Insights and the original conversation ID and activity are left untouched.\n\n\r\n# Changed projects\r\n* integration\r\n\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nimport json\nfrom abc import ABC, abstractmethod\n\n\nclass TelemetryProcessor(ABC):\n \"\"\"Application Insights Telemetry Processor base class for Bot\"\"\"\n\n @property\n def activity_json(self) -> json:\n \"\"\"Retrieve the request body as json (Activity).\"\"\"\n body_text = self.get_request_body()\n body = json.loads(body_text) if body_text is not None else None\n return body\n\n @abstractmethod\n def can_process(self) -> bool:\n \"\"\"Whether the processor can process the request body.\n :return: True if the request body can be processed, False otherwise.\n :rtype: bool\n \"\"\"\n return False\n\n @abstractmethod\n def get_request_body(self) -> str: # pylint: disable=inconsistent-return-statements\n \"\"\"Retrieve the request body from flask/django middleware component.\"\"\"\n raise NotImplementedError()\n\n def __call__(self, data, context) -> bool:\n \"\"\" Traditional Web user and session ID's don't apply for Bots. This processor\n replaces the identifiers to be consistent with Bot Framework's notion of\n user and session id's.\n\n Each event that gets logged (with this processor added) will contain additional\n properties.\n\n The following properties are replaced:\n - context.user.id - The user ID that Application Insights uses to identify\n a unique user.\n - context.session.id - The session ID that APplication Insights uses to\n identify a unique session.\n\n In addition, the additional data properties are added:\n - activityId - The Bot Framework's Activity ID which represents a unique\n message identifier.\n - channelId - The Bot Framework \"Channel\" (ie, slack/facebook/directline/etc)\n - activityType - The Bot Framework message classification (ie, message)\n\n :param data: Data from Application Insights\n :type data: telemetry item\n :param context: Context from Application Insights\n :type context: context object\n :returns: bool -- determines if the event is passed to the server (False = Filtered).\n \"\"\"\n\n post_data = self.activity_json\n if post_data is None:\n # If there is no body (not a BOT request or not configured correctly).\n # We *could* filter here, but we're allowing event to go through.\n return True\n\n # Override session and user id\n from_prop = post_data[\"from\"] if \"from\" in post_data else None\n user_id = from_prop[\"id\"] if from_prop is not None else None\n channel_id = post_data[\"channelId\"] if \"channelId\" in post_data else None\n conversation = (\n post_data[\"conversation\"] if \"conversation\" in post_data else None\n )\n conversation_id = conversation[\"id\"] if \"id\" in conversation else None\n context.user.id = channel_id + user_id\n context.session.id = conversation_id\n\n # Additional bot-specific properties\n if \"id\" in post_data:\n data.properties[\"activityId\"] = post_data[\"id\"]\n if \"channelId\" in post_data:\n data.properties[\"channelId\"] = post_data[\"channelId\"]\n if \"type\" in post_data:\n data.properties[\"activityType\"] = post_data[\"type\"]\n return True\n", "path": "libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/processor/telemetry_processor.py"}]}
1,613
662
gh_patches_debug_63084
rasdani/github-patches
git_diff
translate__pootle-5471
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Static page editing save HTML outcome instead of raw text To reproduce: 1. Server using Markdown for static pages and an existing static page written using Markdown 2. Minor edit the content of the static page (editing the title also seems to do this) 3. Saves fine 4. Edit the page again. Static page text is now all in HTML. 5. View static page on site. Static page is now all garbled HTML </issue> <code> [start of pootle/core/markup/fields.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 import logging 10 11 from lxml.etree import ParserError 12 from lxml.html.clean import clean_html 13 14 from django.conf import settings 15 from django.core.cache import cache 16 from django.db import models 17 from django.utils.safestring import mark_safe 18 19 from .filters import apply_markup_filter 20 from .widgets import MarkupTextarea 21 22 23 __all__ = ('Markup', 'MarkupField',) 24 25 26 logger = logging.getLogger('pootle.markup') 27 28 29 _rendered_cache_key = lambda obj, pk, field: '_%s_%s_%s_rendered' % \ 30 (obj, pk, field) 31 32 33 class Markup(object): 34 35 def __init__(self, instance, field_name, rendered_cache_key): 36 self.instance = instance 37 self.field_name = field_name 38 self.cache_key = rendered_cache_key 39 40 @property 41 def raw(self): 42 return self.instance.__dict__[self.field_name] 43 44 @raw.setter 45 def raw(self, value): 46 setattr(self.instance, self.field_name, value) 47 48 @property 49 def rendered(self): 50 rendered = cache.get(self.cache_key) 51 52 if not rendered: 53 logger.debug(u'Caching rendered output of %r', self.cache_key) 54 rendered = apply_markup_filter(self.raw) 55 cache.set(self.cache_key, rendered, 56 settings.POOTLE_CACHE_TIMEOUT) 57 58 return rendered 59 60 def __unicode__(self): 61 try: 62 return mark_safe(clean_html(self.rendered)) 63 except ParserError: 64 return u'' 65 66 def __nonzero__(self): 67 return self.raw.strip() != '' and self.raw is not None 68 69 70 class MarkupDescriptor(object): 71 72 def __init__(self, field): 73 self.field = field 74 75 def __get__(self, obj, owner): 76 if obj is None: 77 raise AttributeError('Can only be accessed via an instance.') 78 79 markup = obj.__dict__[self.field.name] 80 if markup is None: 81 return None 82 83 cache_key = _rendered_cache_key(obj.__class__.__name__, 84 obj.pk, 85 self.field.name) 86 return Markup(obj, self.field.name, cache_key) 87 88 def __set__(self, obj, value): 89 if isinstance(value, Markup): 90 obj.__dict__[self.field.name] = value.raw 91 else: 92 obj.__dict__[self.field.name] = value 93 94 95 class MarkupField(models.TextField): 96 97 description = 'Text field supporting different markup formats.' 98 99 def contribute_to_class(self, cls, name): 100 super(MarkupField, self).contribute_to_class(cls, name) 101 setattr(cls, self.name, MarkupDescriptor(self)) 102 103 def pre_save(self, model_instance, add): 104 value = super(MarkupField, self).pre_save(model_instance, add) 105 106 if not add: 107 # Invalidate cache to force rendering upon next retrieval 108 cache_key = _rendered_cache_key(model_instance.__class__.__name__, 109 model_instance.pk, 110 self.name) 111 logger.debug('Invalidating cache for %r', cache_key) 112 cache.delete(cache_key) 113 114 return value.raw 115 116 def get_prep_value(self, value): 117 if isinstance(value, Markup): 118 return value.raw 119 120 return value 121 122 def value_to_string(self, obj): 123 value = self._get_val_from_obj(obj) 124 return self.get_prep_value(value) 125 126 def formfield(self, **kwargs): 127 defaults = {'widget': MarkupTextarea} 128 defaults.update(kwargs) 129 return super(MarkupField, self).formfield(**defaults) 130 131 def deconstruct(self): 132 name, path, args, kwargs = super(MarkupField, self).deconstruct() 133 kwargs.pop('help_text', None) 134 return name, path, args, kwargs 135 [end of pootle/core/markup/fields.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pootle/core/markup/fields.py b/pootle/core/markup/fields.py --- a/pootle/core/markup/fields.py +++ b/pootle/core/markup/fields.py @@ -119,8 +119,11 @@ return value + def to_python(self, value): + return self.get_prep_value(value) + def value_to_string(self, obj): - value = self._get_val_from_obj(obj) + value = self.value_from_object(obj) return self.get_prep_value(value) def formfield(self, **kwargs):
{"golden_diff": "diff --git a/pootle/core/markup/fields.py b/pootle/core/markup/fields.py\n--- a/pootle/core/markup/fields.py\n+++ b/pootle/core/markup/fields.py\n@@ -119,8 +119,11 @@\n \n return value\n \n+ def to_python(self, value):\n+ return self.get_prep_value(value)\n+\n def value_to_string(self, obj):\n- value = self._get_val_from_obj(obj)\n+ value = self.value_from_object(obj)\n return self.get_prep_value(value)\n \n def formfield(self, **kwargs):\n", "issue": "Static page editing save HTML outcome instead of raw text\nTo reproduce:\r\n\r\n1. Server using Markdown for static pages and an existing static page written using Markdown\r\n2. Minor edit the content of the static page (editing the title also seems to do this)\r\n3. Saves fine\r\n4. Edit the page again. Static page text is now all in HTML. \r\n5. View static page on site. Static page is now all garbled HTML\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\n\nfrom lxml.etree import ParserError\nfrom lxml.html.clean import clean_html\n\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.db import models\nfrom django.utils.safestring import mark_safe\n\nfrom .filters import apply_markup_filter\nfrom .widgets import MarkupTextarea\n\n\n__all__ = ('Markup', 'MarkupField',)\n\n\nlogger = logging.getLogger('pootle.markup')\n\n\n_rendered_cache_key = lambda obj, pk, field: '_%s_%s_%s_rendered' % \\\n (obj, pk, field)\n\n\nclass Markup(object):\n\n def __init__(self, instance, field_name, rendered_cache_key):\n self.instance = instance\n self.field_name = field_name\n self.cache_key = rendered_cache_key\n\n @property\n def raw(self):\n return self.instance.__dict__[self.field_name]\n\n @raw.setter\n def raw(self, value):\n setattr(self.instance, self.field_name, value)\n\n @property\n def rendered(self):\n rendered = cache.get(self.cache_key)\n\n if not rendered:\n logger.debug(u'Caching rendered output of %r', self.cache_key)\n rendered = apply_markup_filter(self.raw)\n cache.set(self.cache_key, rendered,\n settings.POOTLE_CACHE_TIMEOUT)\n\n return rendered\n\n def __unicode__(self):\n try:\n return mark_safe(clean_html(self.rendered))\n except ParserError:\n return u''\n\n def __nonzero__(self):\n return self.raw.strip() != '' and self.raw is not None\n\n\nclass MarkupDescriptor(object):\n\n def __init__(self, field):\n self.field = field\n\n def __get__(self, obj, owner):\n if obj is None:\n raise AttributeError('Can only be accessed via an instance.')\n\n markup = obj.__dict__[self.field.name]\n if markup is None:\n return None\n\n cache_key = _rendered_cache_key(obj.__class__.__name__,\n obj.pk,\n self.field.name)\n return Markup(obj, self.field.name, cache_key)\n\n def __set__(self, obj, value):\n if isinstance(value, Markup):\n obj.__dict__[self.field.name] = value.raw\n else:\n obj.__dict__[self.field.name] = value\n\n\nclass MarkupField(models.TextField):\n\n description = 'Text field supporting different markup formats.'\n\n def contribute_to_class(self, cls, name):\n super(MarkupField, self).contribute_to_class(cls, name)\n setattr(cls, self.name, MarkupDescriptor(self))\n\n def pre_save(self, model_instance, add):\n value = super(MarkupField, self).pre_save(model_instance, add)\n\n if not add:\n # Invalidate cache to force rendering upon next retrieval\n cache_key = _rendered_cache_key(model_instance.__class__.__name__,\n model_instance.pk,\n self.name)\n logger.debug('Invalidating cache for %r', cache_key)\n cache.delete(cache_key)\n\n return value.raw\n\n def get_prep_value(self, value):\n if isinstance(value, Markup):\n return value.raw\n\n return value\n\n def value_to_string(self, obj):\n value = self._get_val_from_obj(obj)\n return self.get_prep_value(value)\n\n def formfield(self, **kwargs):\n defaults = {'widget': MarkupTextarea}\n defaults.update(kwargs)\n return super(MarkupField, self).formfield(**defaults)\n\n def deconstruct(self):\n name, path, args, kwargs = super(MarkupField, self).deconstruct()\n kwargs.pop('help_text', None)\n return name, path, args, kwargs\n", "path": "pootle/core/markup/fields.py"}]}
1,805
137
gh_patches_debug_28243
rasdani/github-patches
git_diff
zestedesavoir__zds-site-2145
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> La licence est absente lors de la visualisation d'une partie du tutoriel Quand on navigue partout dans un tutoriel, on a normalement la licence de celui en haut à droite. Sauf lorsqu'on regarde une partie ([celle-ci](http://beta.zestedesavoir.com/tutoriels/335/creez-des-applications-pour-android/425/annexes/) par exemple). </issue> <code> [start of zds/utils/templatetags/topbar.py] 1 # coding: utf-8 2 3 from collections import OrderedDict 4 from django import template 5 from django.conf import settings 6 import itertools 7 from zds.forum.models import Forum, Topic 8 from zds.tutorial.models import Tutorial 9 from zds.utils.models import CategorySubCategory, Tag 10 11 12 register = template.Library() 13 14 15 @register.filter('top_categories') 16 def top_categories(user): 17 cats = {} 18 19 forums_pub = Forum.objects.filter(group__isnull=True).select_related("category").all() 20 if user and user.is_authenticated(): 21 forums_prv = Forum\ 22 .objects\ 23 .filter(group__isnull=False, group__in=user.groups.all())\ 24 .select_related("category").all() 25 forums = list(forums_pub | forums_prv) 26 else: 27 forums = list(forums_pub) 28 29 for forum in forums: 30 key = forum.category.title 31 if key in cats: 32 cats[key].append(forum) 33 else: 34 cats[key] = [forum] 35 36 tgs = Topic.objects\ 37 .values('tags', 'pk')\ 38 .distinct()\ 39 .filter(forum__in=forums, tags__isnull=False) 40 41 cts = {} 42 for key, group in itertools.groupby(tgs, lambda item: item["tags"]): 43 for thing in group: 44 if key in cts: 45 cts[key] += 1 46 else: 47 cts[key] = 1 48 49 cpt = 0 50 top_tag = [] 51 sort_list = reversed(sorted(cts.iteritems(), key=lambda k_v: (k_v[1], k_v[0]))) 52 for key, value in sort_list: 53 top_tag.append(key) 54 cpt += 1 55 if cpt >= settings.ZDS_APP['forum']['top_tag_max']: 56 break 57 58 tags = Tag.objects.filter(pk__in=top_tag) 59 60 return {"tags": tags, "categories": cats} 61 62 63 @register.filter('top_categories_tuto') 64 def top_categories_tuto(user): 65 """ 66 Get all the categories and their related subcategories 67 associed with an existing tutorial. The result is sorted 68 by alphabetic order. 69 """ 70 71 # Ordered dict is use to keep order 72 cats = OrderedDict() 73 74 subcats_tutos = Tutorial.objects.values('subcategory').filter(sha_public__isnull=False).all() 75 catsubcats = CategorySubCategory.objects \ 76 .filter(is_main=True)\ 77 .filter(subcategory__in=subcats_tutos)\ 78 .order_by('category__position', 'subcategory__title')\ 79 .select_related('subcategory', 'category')\ 80 .values('category__title', 'subcategory__title', 'subcategory__slug')\ 81 .all() 82 83 for csc in catsubcats: 84 key = csc['category__title'] 85 86 if key in cats: 87 cats[key].append((csc['subcategory__title'], csc['subcategory__slug'])) 88 else: 89 cats[key] = [(csc['subcategory__title'], csc['subcategory__slug'])] 90 91 return cats 92 93 94 @register.filter('auth_forum') 95 def auth_forum(forum, user): 96 return forum.can_read(user) 97 [end of zds/utils/templatetags/topbar.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/utils/templatetags/topbar.py b/zds/utils/templatetags/topbar.py --- a/zds/utils/templatetags/topbar.py +++ b/zds/utils/templatetags/topbar.py @@ -1,9 +1,12 @@ # coding: utf-8 from collections import OrderedDict +import itertools + from django import template from django.conf import settings -import itertools + +from zds.article.models import Article from zds.forum.models import Forum, Topic from zds.tutorial.models import Tutorial from zds.utils.models import CategorySubCategory, Tag @@ -91,6 +94,37 @@ return cats [email protected]('top_categories_article') +def top_categories_article(user): + """ + Get all the categories and their related subcategories + associed with an existing articles. The result is sorted + by alphabetic order. + """ + + # Ordered dict is use to keep order + cats = OrderedDict() + + subcats_articles = Article.objects.values('subcategory').filter(sha_public__isnull=False).all() + catsubcats = CategorySubCategory.objects \ + .filter(is_main=True)\ + .filter(subcategory__in=subcats_articles)\ + .order_by('category__position', 'subcategory__title')\ + .select_related('subcategory', 'category')\ + .values('category__title', 'subcategory__title', 'subcategory__slug')\ + .all() + + for csc in catsubcats: + key = csc['category__title'] + + if key in cats: + cats[key].append((csc['subcategory__title'], csc['subcategory__slug'])) + else: + cats[key] = [(csc['subcategory__title'], csc['subcategory__slug'])] + + return cats + + @register.filter('auth_forum') def auth_forum(forum, user): return forum.can_read(user)
{"golden_diff": "diff --git a/zds/utils/templatetags/topbar.py b/zds/utils/templatetags/topbar.py\n--- a/zds/utils/templatetags/topbar.py\n+++ b/zds/utils/templatetags/topbar.py\n@@ -1,9 +1,12 @@\n # coding: utf-8\n \n from collections import OrderedDict\n+import itertools\n+\n from django import template\n from django.conf import settings\n-import itertools\n+\n+from zds.article.models import Article\n from zds.forum.models import Forum, Topic\n from zds.tutorial.models import Tutorial\n from zds.utils.models import CategorySubCategory, Tag\n@@ -91,6 +94,37 @@\n return cats\n \n \[email protected]('top_categories_article')\n+def top_categories_article(user):\n+ \"\"\"\n+ Get all the categories and their related subcategories\n+ associed with an existing articles. The result is sorted\n+ by alphabetic order.\n+ \"\"\"\n+\n+ # Ordered dict is use to keep order\n+ cats = OrderedDict()\n+\n+ subcats_articles = Article.objects.values('subcategory').filter(sha_public__isnull=False).all()\n+ catsubcats = CategorySubCategory.objects \\\n+ .filter(is_main=True)\\\n+ .filter(subcategory__in=subcats_articles)\\\n+ .order_by('category__position', 'subcategory__title')\\\n+ .select_related('subcategory', 'category')\\\n+ .values('category__title', 'subcategory__title', 'subcategory__slug')\\\n+ .all()\n+\n+ for csc in catsubcats:\n+ key = csc['category__title']\n+\n+ if key in cats:\n+ cats[key].append((csc['subcategory__title'], csc['subcategory__slug']))\n+ else:\n+ cats[key] = [(csc['subcategory__title'], csc['subcategory__slug'])]\n+\n+ return cats\n+\n+\n @register.filter('auth_forum')\n def auth_forum(forum, user):\n return forum.can_read(user)\n", "issue": "La licence est absente lors de la visualisation d'une partie du tutoriel\nQuand on navigue partout dans un tutoriel, on a normalement la licence de celui en haut \u00e0 droite. Sauf lorsqu'on regarde une partie ([celle-ci](http://beta.zestedesavoir.com/tutoriels/335/creez-des-applications-pour-android/425/annexes/) par exemple).\n\n", "before_files": [{"content": "# coding: utf-8\n\nfrom collections import OrderedDict\nfrom django import template\nfrom django.conf import settings\nimport itertools\nfrom zds.forum.models import Forum, Topic\nfrom zds.tutorial.models import Tutorial\nfrom zds.utils.models import CategorySubCategory, Tag\n\n\nregister = template.Library()\n\n\[email protected]('top_categories')\ndef top_categories(user):\n cats = {}\n\n forums_pub = Forum.objects.filter(group__isnull=True).select_related(\"category\").all()\n if user and user.is_authenticated():\n forums_prv = Forum\\\n .objects\\\n .filter(group__isnull=False, group__in=user.groups.all())\\\n .select_related(\"category\").all()\n forums = list(forums_pub | forums_prv)\n else:\n forums = list(forums_pub)\n\n for forum in forums:\n key = forum.category.title\n if key in cats:\n cats[key].append(forum)\n else:\n cats[key] = [forum]\n\n tgs = Topic.objects\\\n .values('tags', 'pk')\\\n .distinct()\\\n .filter(forum__in=forums, tags__isnull=False)\n\n cts = {}\n for key, group in itertools.groupby(tgs, lambda item: item[\"tags\"]):\n for thing in group:\n if key in cts:\n cts[key] += 1\n else:\n cts[key] = 1\n\n cpt = 0\n top_tag = []\n sort_list = reversed(sorted(cts.iteritems(), key=lambda k_v: (k_v[1], k_v[0])))\n for key, value in sort_list:\n top_tag.append(key)\n cpt += 1\n if cpt >= settings.ZDS_APP['forum']['top_tag_max']:\n break\n\n tags = Tag.objects.filter(pk__in=top_tag)\n\n return {\"tags\": tags, \"categories\": cats}\n\n\[email protected]('top_categories_tuto')\ndef top_categories_tuto(user):\n \"\"\"\n Get all the categories and their related subcategories\n associed with an existing tutorial. The result is sorted\n by alphabetic order.\n \"\"\"\n\n # Ordered dict is use to keep order\n cats = OrderedDict()\n\n subcats_tutos = Tutorial.objects.values('subcategory').filter(sha_public__isnull=False).all()\n catsubcats = CategorySubCategory.objects \\\n .filter(is_main=True)\\\n .filter(subcategory__in=subcats_tutos)\\\n .order_by('category__position', 'subcategory__title')\\\n .select_related('subcategory', 'category')\\\n .values('category__title', 'subcategory__title', 'subcategory__slug')\\\n .all()\n\n for csc in catsubcats:\n key = csc['category__title']\n\n if key in cats:\n cats[key].append((csc['subcategory__title'], csc['subcategory__slug']))\n else:\n cats[key] = [(csc['subcategory__title'], csc['subcategory__slug'])]\n\n return cats\n\n\[email protected]('auth_forum')\ndef auth_forum(forum, user):\n return forum.can_read(user)\n", "path": "zds/utils/templatetags/topbar.py"}]}
1,511
447
gh_patches_debug_24481
rasdani/github-patches
git_diff
pyca__cryptography-1206
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Include OpenSSL in our Windows wheels Installing OpenSSL isn't super easy, even with the binaries for it. Maybe we should bundle it? </issue> <code> [start of cryptography/hazmat/bindings/openssl/binding.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 10 # implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 from __future__ import absolute_import, division, print_function 15 16 import sys 17 import threading 18 19 from cryptography.hazmat.bindings.utils import build_ffi 20 21 22 _OSX_PRE_INCLUDE = """ 23 #ifdef __APPLE__ 24 #include <AvailabilityMacros.h> 25 #define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \ 26 DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER 27 #undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER 28 #define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER 29 #endif 30 """ 31 32 _OSX_POST_INCLUDE = """ 33 #ifdef __APPLE__ 34 #undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER 35 #define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \ 36 __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER 37 #endif 38 """ 39 40 41 class Binding(object): 42 """ 43 OpenSSL API wrapper. 44 """ 45 _module_prefix = "cryptography.hazmat.bindings.openssl." 46 _modules = [ 47 "aes", 48 "asn1", 49 "bignum", 50 "bio", 51 "cmac", 52 "cms", 53 "conf", 54 "crypto", 55 "dh", 56 "dsa", 57 "ec", 58 "ecdh", 59 "ecdsa", 60 "engine", 61 "err", 62 "evp", 63 "hmac", 64 "nid", 65 "objects", 66 "opensslv", 67 "osrandom_engine", 68 "pem", 69 "pkcs7", 70 "pkcs12", 71 "rand", 72 "rsa", 73 "ssl", 74 "x509", 75 "x509name", 76 "x509v3", 77 "x509_vfy" 78 ] 79 80 _locks = None 81 _lock_cb_handle = None 82 _lock_init_lock = threading.Lock() 83 84 ffi = None 85 lib = None 86 87 def __init__(self): 88 self._ensure_ffi_initialized() 89 90 @classmethod 91 def _ensure_ffi_initialized(cls): 92 if cls.ffi is not None and cls.lib is not None: 93 return 94 95 # OpenSSL goes by a different library name on different operating 96 # systems. 97 if sys.platform != "win32": 98 libraries = ["crypto", "ssl"] 99 else: # pragma: no cover 100 libraries = ["libeay32", "ssleay32", "advapi32"] 101 102 cls.ffi, cls.lib = build_ffi( 103 module_prefix=cls._module_prefix, 104 modules=cls._modules, 105 pre_include=_OSX_PRE_INCLUDE, 106 post_include=_OSX_POST_INCLUDE, 107 libraries=libraries, 108 ) 109 res = cls.lib.Cryptography_add_osrandom_engine() 110 assert res != 0 111 112 @classmethod 113 def is_available(cls): 114 # For now, OpenSSL is considered our "default" binding, so we treat it 115 # as always available. 116 return True 117 118 @classmethod 119 def init_static_locks(cls): 120 with cls._lock_init_lock: 121 cls._ensure_ffi_initialized() 122 123 if not cls._lock_cb_handle: 124 cls._lock_cb_handle = cls.ffi.callback( 125 "void(int, int, const char *, int)", 126 cls._lock_cb 127 ) 128 129 # Use Python's implementation if available, importing _ssl triggers 130 # the setup for this. 131 __import__("_ssl") 132 133 if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL: 134 return 135 136 # If nothing else has setup a locking callback already, we set up 137 # our own 138 num_locks = cls.lib.CRYPTO_num_locks() 139 cls._locks = [threading.Lock() for n in range(num_locks)] 140 141 cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle) 142 143 @classmethod 144 def _lock_cb(cls, mode, n, file, line): 145 lock = cls._locks[n] 146 147 if mode & cls.lib.CRYPTO_LOCK: 148 lock.acquire() 149 elif mode & cls.lib.CRYPTO_UNLOCK: 150 lock.release() 151 else: 152 raise RuntimeError( 153 "Unknown lock mode {0}: lock={1}, file={2}, line={3}.".format( 154 mode, n, file, line 155 ) 156 ) 157 [end of cryptography/hazmat/bindings/openssl/binding.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cryptography/hazmat/bindings/openssl/binding.py b/cryptography/hazmat/bindings/openssl/binding.py --- a/cryptography/hazmat/bindings/openssl/binding.py +++ b/cryptography/hazmat/bindings/openssl/binding.py @@ -13,6 +13,7 @@ from __future__ import absolute_import, division, print_function +import os import sys import threading @@ -97,7 +98,8 @@ if sys.platform != "win32": libraries = ["crypto", "ssl"] else: # pragma: no cover - libraries = ["libeay32", "ssleay32", "advapi32"] + link_type = os.environ.get("PYCA_WINDOWS_LINK_TYPE", "static") + libraries = _get_windows_libraries(link_type) cls.ffi, cls.lib = build_ffi( module_prefix=cls._module_prefix, @@ -154,3 +156,15 @@ mode, n, file, line ) ) + + +def _get_windows_libraries(link_type): + if link_type == "dynamic": + return ["libeay32", "ssleay32", "advapi32"] + elif link_type == "static" or link_type == "": + return ["libeay32mt", "ssleay32mt", "advapi32", + "crypt32", "gdi32", "user32", "ws2_32"] + else: + raise ValueError( + "PYCA_WINDOWS_LINK_TYPE must be 'static' or 'dynamic'" + )
{"golden_diff": "diff --git a/cryptography/hazmat/bindings/openssl/binding.py b/cryptography/hazmat/bindings/openssl/binding.py\n--- a/cryptography/hazmat/bindings/openssl/binding.py\n+++ b/cryptography/hazmat/bindings/openssl/binding.py\n@@ -13,6 +13,7 @@\n \n from __future__ import absolute_import, division, print_function\n \n+import os\n import sys\n import threading\n \n@@ -97,7 +98,8 @@\n if sys.platform != \"win32\":\n libraries = [\"crypto\", \"ssl\"]\n else: # pragma: no cover\n- libraries = [\"libeay32\", \"ssleay32\", \"advapi32\"]\n+ link_type = os.environ.get(\"PYCA_WINDOWS_LINK_TYPE\", \"static\")\n+ libraries = _get_windows_libraries(link_type)\n \n cls.ffi, cls.lib = build_ffi(\n module_prefix=cls._module_prefix,\n@@ -154,3 +156,15 @@\n mode, n, file, line\n )\n )\n+\n+\n+def _get_windows_libraries(link_type):\n+ if link_type == \"dynamic\":\n+ return [\"libeay32\", \"ssleay32\", \"advapi32\"]\n+ elif link_type == \"static\" or link_type == \"\":\n+ return [\"libeay32mt\", \"ssleay32mt\", \"advapi32\",\n+ \"crypt32\", \"gdi32\", \"user32\", \"ws2_32\"]\n+ else:\n+ raise ValueError(\n+ \"PYCA_WINDOWS_LINK_TYPE must be 'static' or 'dynamic'\"\n+ )\n", "issue": "Include OpenSSL in our Windows wheels\nInstalling OpenSSL isn't super easy, even with the binaries for it. Maybe we should bundle it?\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\nimport threading\n\nfrom cryptography.hazmat.bindings.utils import build_ffi\n\n\n_OSX_PRE_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#include <AvailabilityMacros.h>\n#define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n_OSX_POST_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n _module_prefix = \"cryptography.hazmat.bindings.openssl.\"\n _modules = [\n \"aes\",\n \"asn1\",\n \"bignum\",\n \"bio\",\n \"cmac\",\n \"cms\",\n \"conf\",\n \"crypto\",\n \"dh\",\n \"dsa\",\n \"ec\",\n \"ecdh\",\n \"ecdsa\",\n \"engine\",\n \"err\",\n \"evp\",\n \"hmac\",\n \"nid\",\n \"objects\",\n \"opensslv\",\n \"osrandom_engine\",\n \"pem\",\n \"pkcs7\",\n \"pkcs12\",\n \"rand\",\n \"rsa\",\n \"ssl\",\n \"x509\",\n \"x509name\",\n \"x509v3\",\n \"x509_vfy\"\n ]\n\n _locks = None\n _lock_cb_handle = None\n _lock_init_lock = threading.Lock()\n\n ffi = None\n lib = None\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n if cls.ffi is not None and cls.lib is not None:\n return\n\n # OpenSSL goes by a different library name on different operating\n # systems.\n if sys.platform != \"win32\":\n libraries = [\"crypto\", \"ssl\"]\n else: # pragma: no cover\n libraries = [\"libeay32\", \"ssleay32\", \"advapi32\"]\n\n cls.ffi, cls.lib = build_ffi(\n module_prefix=cls._module_prefix,\n modules=cls._modules,\n pre_include=_OSX_PRE_INCLUDE,\n post_include=_OSX_POST_INCLUDE,\n libraries=libraries,\n )\n res = cls.lib.Cryptography_add_osrandom_engine()\n assert res != 0\n\n @classmethod\n def is_available(cls):\n # For now, OpenSSL is considered our \"default\" binding, so we treat it\n # as always available.\n return True\n\n @classmethod\n def init_static_locks(cls):\n with cls._lock_init_lock:\n cls._ensure_ffi_initialized()\n\n if not cls._lock_cb_handle:\n cls._lock_cb_handle = cls.ffi.callback(\n \"void(int, int, const char *, int)\",\n cls._lock_cb\n )\n\n # Use Python's implementation if available, importing _ssl triggers\n # the setup for this.\n __import__(\"_ssl\")\n\n if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL:\n return\n\n # If nothing else has setup a locking callback already, we set up\n # our own\n num_locks = cls.lib.CRYPTO_num_locks()\n cls._locks = [threading.Lock() for n in range(num_locks)]\n\n cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle)\n\n @classmethod\n def _lock_cb(cls, mode, n, file, line):\n lock = cls._locks[n]\n\n if mode & cls.lib.CRYPTO_LOCK:\n lock.acquire()\n elif mode & cls.lib.CRYPTO_UNLOCK:\n lock.release()\n else:\n raise RuntimeError(\n \"Unknown lock mode {0}: lock={1}, file={2}, line={3}.\".format(\n mode, n, file, line\n )\n )\n", "path": "cryptography/hazmat/bindings/openssl/binding.py"}]}
2,015
379
gh_patches_debug_19067
rasdani/github-patches
git_diff
voxel51__fiftyone-157
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> fiftyone fails to import if not installed in a virtual environment Due to at least https://github.com/voxel51/fiftyone/blob/v0.1.3/fiftyone/constants.py#L32 </issue> <code> [start of fiftyone/constants.py] 1 """ 2 Package-wide constants. 3 4 | Copyright 2017-2020, Voxel51, Inc. 5 | `voxel51.com <https://voxel51.com/>`_ 6 | 7 """ 8 # pragma pylint: disable=redefined-builtin 9 # pragma pylint: disable=unused-wildcard-import 10 # pragma pylint: disable=wildcard-import 11 from __future__ import absolute_import 12 from __future__ import division 13 from __future__ import print_function 14 from __future__ import unicode_literals 15 from builtins import * 16 17 # pragma pylint: enable=redefined-builtin 18 # pragma pylint: enable=unused-wildcard-import 19 # pragma pylint: enable=wildcard-import 20 21 import os 22 import sys 23 24 try: 25 from importlib.metadata import metadata # Python 3.8 26 except ImportError: 27 from importlib_metadata import metadata # Python < 3.8 28 29 30 # Directories 31 FIFTYONE_DIR = os.path.abspath(os.path.dirname(__file__)) 32 FIFTYONE_ENV = os.environ["VIRTUAL_ENV"] 33 FIFTYONE_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".fiftyone") 34 FIFTYONE_CONFIG_PATH = os.path.join(FIFTYONE_CONFIG_DIR, "config.json") 35 BASE_DIR = os.path.dirname(FIFTYONE_DIR) 36 EXAMPLES_DIR = os.path.join(FIFTYONE_DIR, "examples") 37 if not os.path.isdir(EXAMPLES_DIR): 38 EXAMPLES_DIR = os.path.join(BASE_DIR, "examples") 39 40 # Package metadata 41 _META = metadata("fiftyone") 42 NAME = _META["name"] 43 VERSION = _META["version"] 44 DESCRIPTION = _META["summary"] 45 AUTHOR = _META["author"] 46 AUTHOR_EMAIL = _META["author-email"] 47 URL = _META["home-page"] 48 LICENSE = _META["license"] 49 VERSION_LONG = "%s v%s, %s" % (NAME, VERSION, AUTHOR) 50 51 # MongoDB setup 52 try: 53 from fiftyone.db import FIFTYONE_DB_BIN_DIR 54 except ImportError: 55 # development installation 56 FIFTYONE_DB_BIN_DIR = os.path.join(FIFTYONE_CONFIG_DIR, "bin") 57 DB_PATH = os.path.join(FIFTYONE_CONFIG_DIR, "var/lib/mongo") 58 DB_BIN_PATH = os.path.join(FIFTYONE_DB_BIN_DIR, "mongod") 59 DB_CLIENT_BIN_PATH = os.path.join(FIFTYONE_DB_BIN_DIR, "mongo") 60 DB_LOG_PATH = os.path.join(FIFTYONE_CONFIG_DIR, "var/log/mongodb/mongo.log") 61 os.environ["PATH"] = os.pathsep.join([FIFTYONE_CONFIG_DIR, os.environ["PATH"]]) 62 START_DB = [ 63 DB_BIN_PATH, 64 "--dbpath", 65 DB_PATH, 66 "--logpath", 67 DB_LOG_PATH, 68 "--fork", 69 ] 70 STOP_DB = " ".join( 71 [ 72 DB_CLIENT_BIN_PATH, 73 "--eval", 74 '''"db.getSiblingDB('admin').shutdownServer()"''', 75 ">/dev/null 2>&1", 76 ] 77 ) 78 79 if sys.platform.startswith("linux"): 80 _STOP_SERVICE = "fuser -k %d/tcp >/dev/null 2>&1" 81 82 elif sys.platform == "darwin": 83 _STOP_SERVICE = ( 84 "lsof -i tcp:%d | grep -v PID | awk '{print $2}' | xargs kill" 85 ) 86 87 else: 88 raise OSError("Unsupported OS: %s" % sys.platform) 89 90 # Server setup 91 SERVER_DIR = os.path.join(FIFTYONE_DIR, "server") 92 SERVER_ADDR = "http://127.0.0.1:%d" 93 START_SERVER = [ 94 "gunicorn", 95 "-w", 96 "1", 97 "--worker-class", 98 "eventlet", 99 "-b", 100 "127.0.0.1:%d", 101 "main:app", 102 "--daemon", 103 "--reload", 104 ] 105 106 STOP_SERVER = _STOP_SERVICE 107 108 # App setup 109 try: 110 from fiftyone.gui import FIFTYONE_APP_DIR 111 except ImportError: 112 FIFTYONE_APP_DIR = os.path.join(FIFTYONE_DIR, "../electron") 113 [end of fiftyone/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/fiftyone/constants.py b/fiftyone/constants.py --- a/fiftyone/constants.py +++ b/fiftyone/constants.py @@ -29,7 +29,6 @@ # Directories FIFTYONE_DIR = os.path.abspath(os.path.dirname(__file__)) -FIFTYONE_ENV = os.environ["VIRTUAL_ENV"] FIFTYONE_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".fiftyone") FIFTYONE_CONFIG_PATH = os.path.join(FIFTYONE_CONFIG_DIR, "config.json") BASE_DIR = os.path.dirname(FIFTYONE_DIR) @@ -58,7 +57,6 @@ DB_BIN_PATH = os.path.join(FIFTYONE_DB_BIN_DIR, "mongod") DB_CLIENT_BIN_PATH = os.path.join(FIFTYONE_DB_BIN_DIR, "mongo") DB_LOG_PATH = os.path.join(FIFTYONE_CONFIG_DIR, "var/log/mongodb/mongo.log") -os.environ["PATH"] = os.pathsep.join([FIFTYONE_CONFIG_DIR, os.environ["PATH"]]) START_DB = [ DB_BIN_PATH, "--dbpath",
{"golden_diff": "diff --git a/fiftyone/constants.py b/fiftyone/constants.py\n--- a/fiftyone/constants.py\n+++ b/fiftyone/constants.py\n@@ -29,7 +29,6 @@\n \n # Directories\n FIFTYONE_DIR = os.path.abspath(os.path.dirname(__file__))\n-FIFTYONE_ENV = os.environ[\"VIRTUAL_ENV\"]\n FIFTYONE_CONFIG_DIR = os.path.join(os.path.expanduser(\"~\"), \".fiftyone\")\n FIFTYONE_CONFIG_PATH = os.path.join(FIFTYONE_CONFIG_DIR, \"config.json\")\n BASE_DIR = os.path.dirname(FIFTYONE_DIR)\n@@ -58,7 +57,6 @@\n DB_BIN_PATH = os.path.join(FIFTYONE_DB_BIN_DIR, \"mongod\")\n DB_CLIENT_BIN_PATH = os.path.join(FIFTYONE_DB_BIN_DIR, \"mongo\")\n DB_LOG_PATH = os.path.join(FIFTYONE_CONFIG_DIR, \"var/log/mongodb/mongo.log\")\n-os.environ[\"PATH\"] = os.pathsep.join([FIFTYONE_CONFIG_DIR, os.environ[\"PATH\"]])\n START_DB = [\n DB_BIN_PATH,\n \"--dbpath\",\n", "issue": "fiftyone fails to import if not installed in a virtual environment\nDue to at least https://github.com/voxel51/fiftyone/blob/v0.1.3/fiftyone/constants.py#L32\n", "before_files": [{"content": "\"\"\"\nPackage-wide constants.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\n# pragma pylint: disable=redefined-builtin\n# pragma pylint: disable=unused-wildcard-import\n# pragma pylint: disable=wildcard-import\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom builtins import *\n\n# pragma pylint: enable=redefined-builtin\n# pragma pylint: enable=unused-wildcard-import\n# pragma pylint: enable=wildcard-import\n\nimport os\nimport sys\n\ntry:\n from importlib.metadata import metadata # Python 3.8\nexcept ImportError:\n from importlib_metadata import metadata # Python < 3.8\n\n\n# Directories\nFIFTYONE_DIR = os.path.abspath(os.path.dirname(__file__))\nFIFTYONE_ENV = os.environ[\"VIRTUAL_ENV\"]\nFIFTYONE_CONFIG_DIR = os.path.join(os.path.expanduser(\"~\"), \".fiftyone\")\nFIFTYONE_CONFIG_PATH = os.path.join(FIFTYONE_CONFIG_DIR, \"config.json\")\nBASE_DIR = os.path.dirname(FIFTYONE_DIR)\nEXAMPLES_DIR = os.path.join(FIFTYONE_DIR, \"examples\")\nif not os.path.isdir(EXAMPLES_DIR):\n EXAMPLES_DIR = os.path.join(BASE_DIR, \"examples\")\n\n# Package metadata\n_META = metadata(\"fiftyone\")\nNAME = _META[\"name\"]\nVERSION = _META[\"version\"]\nDESCRIPTION = _META[\"summary\"]\nAUTHOR = _META[\"author\"]\nAUTHOR_EMAIL = _META[\"author-email\"]\nURL = _META[\"home-page\"]\nLICENSE = _META[\"license\"]\nVERSION_LONG = \"%s v%s, %s\" % (NAME, VERSION, AUTHOR)\n\n# MongoDB setup\ntry:\n from fiftyone.db import FIFTYONE_DB_BIN_DIR\nexcept ImportError:\n # development installation\n FIFTYONE_DB_BIN_DIR = os.path.join(FIFTYONE_CONFIG_DIR, \"bin\")\nDB_PATH = os.path.join(FIFTYONE_CONFIG_DIR, \"var/lib/mongo\")\nDB_BIN_PATH = os.path.join(FIFTYONE_DB_BIN_DIR, \"mongod\")\nDB_CLIENT_BIN_PATH = os.path.join(FIFTYONE_DB_BIN_DIR, \"mongo\")\nDB_LOG_PATH = os.path.join(FIFTYONE_CONFIG_DIR, \"var/log/mongodb/mongo.log\")\nos.environ[\"PATH\"] = os.pathsep.join([FIFTYONE_CONFIG_DIR, os.environ[\"PATH\"]])\nSTART_DB = [\n DB_BIN_PATH,\n \"--dbpath\",\n DB_PATH,\n \"--logpath\",\n DB_LOG_PATH,\n \"--fork\",\n]\nSTOP_DB = \" \".join(\n [\n DB_CLIENT_BIN_PATH,\n \"--eval\",\n '''\"db.getSiblingDB('admin').shutdownServer()\"''',\n \">/dev/null 2>&1\",\n ]\n)\n\nif sys.platform.startswith(\"linux\"):\n _STOP_SERVICE = \"fuser -k %d/tcp >/dev/null 2>&1\"\n\nelif sys.platform == \"darwin\":\n _STOP_SERVICE = (\n \"lsof -i tcp:%d | grep -v PID | awk '{print $2}' | xargs kill\"\n )\n\nelse:\n raise OSError(\"Unsupported OS: %s\" % sys.platform)\n\n# Server setup\nSERVER_DIR = os.path.join(FIFTYONE_DIR, \"server\")\nSERVER_ADDR = \"http://127.0.0.1:%d\"\nSTART_SERVER = [\n \"gunicorn\",\n \"-w\",\n \"1\",\n \"--worker-class\",\n \"eventlet\",\n \"-b\",\n \"127.0.0.1:%d\",\n \"main:app\",\n \"--daemon\",\n \"--reload\",\n]\n\nSTOP_SERVER = _STOP_SERVICE\n\n# App setup\ntry:\n from fiftyone.gui import FIFTYONE_APP_DIR\nexcept ImportError:\n FIFTYONE_APP_DIR = os.path.join(FIFTYONE_DIR, \"../electron\")\n", "path": "fiftyone/constants.py"}]}
1,688
242
gh_patches_debug_14988
rasdani/github-patches
git_diff
saleor__saleor-13683
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Filter warehouses by metadata There are stores that have hundreds of warehouses, for performing updates metadata can be useful as a lookup mechanism instead of using externalReference </issue> <code> [start of saleor/graphql/warehouse/filters.py] 1 import django_filters 2 import graphene 3 from django.db.models import Exists, OuterRef, Q 4 5 from ...account.models import Address 6 from ...product.models import Product, ProductVariant 7 from ...warehouse import WarehouseClickAndCollectOption 8 from ...warehouse.models import Stock, Warehouse 9 from ..channel.types import Channel 10 from ..core.doc_category import DOC_CATEGORY_PRODUCTS 11 from ..core.filters import ( 12 EnumFilter, 13 GlobalIDMultipleChoiceFilter, 14 ListObjectTypeFilter, 15 filter_slug_list, 16 ) 17 from ..core.types import FilterInputObjectType 18 from ..utils import resolve_global_ids_to_primary_keys 19 from ..warehouse.enums import WarehouseClickAndCollectOptionEnum 20 21 22 def prefech_qs_for_filter(qs): 23 return qs.prefetch_related("address") 24 25 26 def filter_search_warehouse(qs, _, value): 27 if value: 28 addresses = Address.objects.filter( 29 Q(company_name__ilike=value) 30 | Q(street_address_1__ilike=value) 31 | Q(street_address_2__ilike=value) 32 | Q(city__ilike=value) 33 | Q(postal_code__ilike=value) 34 | Q(phone__ilike=value) 35 ).values("pk") 36 qs = qs.filter( 37 Q(name__ilike=value) 38 | Q(email__ilike=value) 39 | Q(Exists(addresses.filter(pk=OuterRef("address_id")))) 40 ) 41 return qs 42 43 44 def filter_click_and_collect_option(qs, _, value): 45 if value == WarehouseClickAndCollectOptionEnum.LOCAL.value: 46 qs = qs.filter( 47 click_and_collect_option=WarehouseClickAndCollectOption.LOCAL_STOCK 48 ) 49 elif value == WarehouseClickAndCollectOptionEnum.ALL.value: 50 qs = qs.filter( 51 click_and_collect_option=WarehouseClickAndCollectOption.ALL_WAREHOUSES 52 ) 53 elif value == WarehouseClickAndCollectOptionEnum.DISABLED.value: 54 qs = qs.filter(click_and_collect_option=WarehouseClickAndCollectOption.DISABLED) 55 return qs 56 57 58 def filter_channels(qs, _, values): 59 if values: 60 _, channels_ids = resolve_global_ids_to_primary_keys(values, Channel) 61 WarehouseChannel = Warehouse.channels.through 62 warehouse_channels = WarehouseChannel.objects.filter( 63 channel_id__in=channels_ids 64 ) 65 qs = qs.filter(Exists(warehouse_channels.filter(warehouse_id=OuterRef("id")))) 66 return qs 67 68 69 def filter_search_stock(qs, _, value): 70 if value: 71 products = Product.objects.filter(name__ilike=value).values("pk") 72 variants = ProductVariant.objects.filter( 73 Q(name__ilike=value) | Q(Exists(products.filter(pk=OuterRef("product_id")))) 74 ).values("pk") 75 addresses = Address.objects.filter(company_name__ilike=value) 76 warehouses = Warehouse.objects.filter( 77 Q(name__ilike=value) 78 | Q(Exists(addresses.filter(id=OuterRef("address_id")))) 79 ).values("pk") 80 return qs.filter( 81 Q(Exists(variants.filter(pk=OuterRef("product_variant_id")))) 82 | Q(Exists(warehouses.filter(stock=OuterRef("pk")))) 83 ) 84 return qs 85 86 87 class WarehouseFilter(django_filters.FilterSet): 88 search = django_filters.CharFilter(method=filter_search_warehouse) 89 ids = GlobalIDMultipleChoiceFilter(field_name="id") 90 is_private = django_filters.BooleanFilter(field_name="is_private") 91 click_and_collect_option = EnumFilter( 92 input_class=WarehouseClickAndCollectOptionEnum, 93 method=filter_click_and_collect_option, 94 ) 95 channels = GlobalIDMultipleChoiceFilter(method=filter_channels) 96 slugs = ListObjectTypeFilter(input_class=graphene.String, method=filter_slug_list) 97 98 class Meta: 99 model = Warehouse 100 fields = ["click_and_collect_option"] 101 102 103 class WarehouseFilterInput(FilterInputObjectType): 104 class Meta: 105 doc_category = DOC_CATEGORY_PRODUCTS 106 filterset_class = WarehouseFilter 107 108 109 class StockFilter(django_filters.FilterSet): 110 search = django_filters.CharFilter(method=filter_search_stock) 111 112 class Meta: 113 model = Stock 114 fields = ["quantity"] 115 116 117 class StockFilterInput(FilterInputObjectType): 118 class Meta: 119 doc_category = DOC_CATEGORY_PRODUCTS 120 filterset_class = StockFilter 121 [end of saleor/graphql/warehouse/filters.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/graphql/warehouse/filters.py b/saleor/graphql/warehouse/filters.py --- a/saleor/graphql/warehouse/filters.py +++ b/saleor/graphql/warehouse/filters.py @@ -12,6 +12,7 @@ EnumFilter, GlobalIDMultipleChoiceFilter, ListObjectTypeFilter, + MetadataFilterBase, filter_slug_list, ) from ..core.types import FilterInputObjectType @@ -84,7 +85,7 @@ return qs -class WarehouseFilter(django_filters.FilterSet): +class WarehouseFilter(MetadataFilterBase): search = django_filters.CharFilter(method=filter_search_warehouse) ids = GlobalIDMultipleChoiceFilter(field_name="id") is_private = django_filters.BooleanFilter(field_name="is_private")
{"golden_diff": "diff --git a/saleor/graphql/warehouse/filters.py b/saleor/graphql/warehouse/filters.py\n--- a/saleor/graphql/warehouse/filters.py\n+++ b/saleor/graphql/warehouse/filters.py\n@@ -12,6 +12,7 @@\n EnumFilter,\n GlobalIDMultipleChoiceFilter,\n ListObjectTypeFilter,\n+ MetadataFilterBase,\n filter_slug_list,\n )\n from ..core.types import FilterInputObjectType\n@@ -84,7 +85,7 @@\n return qs\n \n \n-class WarehouseFilter(django_filters.FilterSet):\n+class WarehouseFilter(MetadataFilterBase):\n search = django_filters.CharFilter(method=filter_search_warehouse)\n ids = GlobalIDMultipleChoiceFilter(field_name=\"id\")\n is_private = django_filters.BooleanFilter(field_name=\"is_private\")\n", "issue": "Filter warehouses by metadata\nThere are stores that have hundreds of warehouses, for performing updates metadata can be useful as a lookup mechanism instead of using externalReference\n", "before_files": [{"content": "import django_filters\nimport graphene\nfrom django.db.models import Exists, OuterRef, Q\n\nfrom ...account.models import Address\nfrom ...product.models import Product, ProductVariant\nfrom ...warehouse import WarehouseClickAndCollectOption\nfrom ...warehouse.models import Stock, Warehouse\nfrom ..channel.types import Channel\nfrom ..core.doc_category import DOC_CATEGORY_PRODUCTS\nfrom ..core.filters import (\n EnumFilter,\n GlobalIDMultipleChoiceFilter,\n ListObjectTypeFilter,\n filter_slug_list,\n)\nfrom ..core.types import FilterInputObjectType\nfrom ..utils import resolve_global_ids_to_primary_keys\nfrom ..warehouse.enums import WarehouseClickAndCollectOptionEnum\n\n\ndef prefech_qs_for_filter(qs):\n return qs.prefetch_related(\"address\")\n\n\ndef filter_search_warehouse(qs, _, value):\n if value:\n addresses = Address.objects.filter(\n Q(company_name__ilike=value)\n | Q(street_address_1__ilike=value)\n | Q(street_address_2__ilike=value)\n | Q(city__ilike=value)\n | Q(postal_code__ilike=value)\n | Q(phone__ilike=value)\n ).values(\"pk\")\n qs = qs.filter(\n Q(name__ilike=value)\n | Q(email__ilike=value)\n | Q(Exists(addresses.filter(pk=OuterRef(\"address_id\"))))\n )\n return qs\n\n\ndef filter_click_and_collect_option(qs, _, value):\n if value == WarehouseClickAndCollectOptionEnum.LOCAL.value:\n qs = qs.filter(\n click_and_collect_option=WarehouseClickAndCollectOption.LOCAL_STOCK\n )\n elif value == WarehouseClickAndCollectOptionEnum.ALL.value:\n qs = qs.filter(\n click_and_collect_option=WarehouseClickAndCollectOption.ALL_WAREHOUSES\n )\n elif value == WarehouseClickAndCollectOptionEnum.DISABLED.value:\n qs = qs.filter(click_and_collect_option=WarehouseClickAndCollectOption.DISABLED)\n return qs\n\n\ndef filter_channels(qs, _, values):\n if values:\n _, channels_ids = resolve_global_ids_to_primary_keys(values, Channel)\n WarehouseChannel = Warehouse.channels.through\n warehouse_channels = WarehouseChannel.objects.filter(\n channel_id__in=channels_ids\n )\n qs = qs.filter(Exists(warehouse_channels.filter(warehouse_id=OuterRef(\"id\"))))\n return qs\n\n\ndef filter_search_stock(qs, _, value):\n if value:\n products = Product.objects.filter(name__ilike=value).values(\"pk\")\n variants = ProductVariant.objects.filter(\n Q(name__ilike=value) | Q(Exists(products.filter(pk=OuterRef(\"product_id\"))))\n ).values(\"pk\")\n addresses = Address.objects.filter(company_name__ilike=value)\n warehouses = Warehouse.objects.filter(\n Q(name__ilike=value)\n | Q(Exists(addresses.filter(id=OuterRef(\"address_id\"))))\n ).values(\"pk\")\n return qs.filter(\n Q(Exists(variants.filter(pk=OuterRef(\"product_variant_id\"))))\n | Q(Exists(warehouses.filter(stock=OuterRef(\"pk\"))))\n )\n return qs\n\n\nclass WarehouseFilter(django_filters.FilterSet):\n search = django_filters.CharFilter(method=filter_search_warehouse)\n ids = GlobalIDMultipleChoiceFilter(field_name=\"id\")\n is_private = django_filters.BooleanFilter(field_name=\"is_private\")\n click_and_collect_option = EnumFilter(\n input_class=WarehouseClickAndCollectOptionEnum,\n method=filter_click_and_collect_option,\n )\n channels = GlobalIDMultipleChoiceFilter(method=filter_channels)\n slugs = ListObjectTypeFilter(input_class=graphene.String, method=filter_slug_list)\n\n class Meta:\n model = Warehouse\n fields = [\"click_and_collect_option\"]\n\n\nclass WarehouseFilterInput(FilterInputObjectType):\n class Meta:\n doc_category = DOC_CATEGORY_PRODUCTS\n filterset_class = WarehouseFilter\n\n\nclass StockFilter(django_filters.FilterSet):\n search = django_filters.CharFilter(method=filter_search_stock)\n\n class Meta:\n model = Stock\n fields = [\"quantity\"]\n\n\nclass StockFilterInput(FilterInputObjectType):\n class Meta:\n doc_category = DOC_CATEGORY_PRODUCTS\n filterset_class = StockFilter\n", "path": "saleor/graphql/warehouse/filters.py"}]}
1,727
174
gh_patches_debug_26456
rasdani/github-patches
git_diff
deepchecks__deepchecks-956
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [FEAT][GENERAL] check for the latest version and notify if using an older version **Describe the solution you'd like** Check that the latest version is installed and raise a warning when importing the package if it's not the latest version. </issue> <code> [start of deepchecks/__init__.py] 1 # ---------------------------------------------------------------------------- 2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com) 3 # 4 # This file is part of Deepchecks. 5 # Deepchecks is distributed under the terms of the GNU Affero General 6 # Public License (version 3 or later). 7 # You should have received a copy of the GNU Affero General Public License 8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>. 9 # ---------------------------------------------------------------------------- 10 # 11 """Deepchecks.""" 12 import os 13 import pathlib 14 import warnings 15 import matplotlib 16 import plotly.io as pio 17 18 19 from deepchecks.utils.ipython import is_notebook 20 from deepchecks.tabular import ( 21 Dataset, 22 Suite, 23 Context, 24 SingleDatasetCheck, 25 TrainTestCheck, 26 ModelOnlyCheck, 27 ModelComparisonCheck, 28 ModelComparisonSuite, 29 ) 30 from deepchecks.core import ( 31 BaseCheck, 32 BaseSuite, 33 CheckResult, 34 CheckFailure, 35 SuiteResult, 36 Condition, 37 ConditionResult, 38 ConditionCategory, 39 SingleDatasetBaseCheck, 40 TrainTestBaseCheck, 41 ModelOnlyBaseCheck 42 ) 43 44 45 warnings.warn( 46 # TODO: better message 47 'Ability to import base tabular functionality from ' 48 'the `deepchecks` directly is deprecated, please import from ' 49 '`deepchecks.tabular` instead', 50 DeprecationWarning 51 ) 52 53 54 __all__ = [ 55 'BaseCheck', 56 'SingleDatasetBaseCheck', 57 'TrainTestBaseCheck', 58 'ModelOnlyBaseCheck', 59 'ModelComparisonCheck', 60 'CheckResult', 61 'CheckFailure', 62 'Condition', 63 'ConditionResult', 64 'ConditionCategory', 65 'BaseSuite', 66 'SuiteResult', 67 68 # tabular checks 69 'SingleDatasetCheck', 70 'TrainTestCheck', 71 'ModelOnlyCheck', 72 'Dataset', 73 'Suite', 74 'ModelComparisonSuite', 75 'Context' 76 ] 77 78 79 # Matplotlib has multiple backends. If we are in a context that does not support GUI (For example, during unit tests) 80 # we can't use a GUI backend. Thus we must use a non-GUI backend. 81 if not is_notebook(): 82 matplotlib.use('Agg') 83 84 # We can't rely on that the user will have an active internet connection, thus we change the default backend to 85 # "notebook" If plotly detects the 'notebook-connected' backend. 86 # for more info, see: https://plotly.com/python/renderers/ 87 pio_backends = pio.renderers.default.split('+') 88 if 'notebook_connected' in pio_backends: 89 pio_backends[pio_backends.index('notebook_connected')] = 'notebook' 90 pio.renderers.default = '+'.join(pio_backends) 91 92 93 # Set version info 94 try: 95 MODULE_DIR = pathlib.Path(__file__).absolute().parent.parent 96 with open(os.path.join(MODULE_DIR, 'VERSION'), 'r', encoding='utf-8') as f: 97 __version__ = f.read().strip() 98 except Exception: # pylint: disable=broad-except 99 # If version file can't be found, leave version empty 100 __version__ = '' 101 [end of deepchecks/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/deepchecks/__init__.py b/deepchecks/__init__.py --- a/deepchecks/__init__.py +++ b/deepchecks/__init__.py @@ -11,10 +11,11 @@ """Deepchecks.""" import os import pathlib +import http.client import warnings import matplotlib import plotly.io as pio - +from pkg_resources import parse_version from deepchecks.utils.ipython import is_notebook from deepchecks.tabular import ( @@ -95,6 +96,20 @@ MODULE_DIR = pathlib.Path(__file__).absolute().parent.parent with open(os.path.join(MODULE_DIR, 'VERSION'), 'r', encoding='utf-8') as f: __version__ = f.read().strip() -except Exception: # pylint: disable=broad-except +except: # pylint: disable=bare-except # noqa # If version file can't be found, leave version empty __version__ = '' + +# Check for latest version +try: + disable = os.environ.get('DEEPCHECKS_DISABLE_LATEST', 'false').lower() == 'true' + if not disable: + conn = http.client.HTTPSConnection('api.deepchecks.com', timeout=3) + conn.request('GET', '/latest') + response = conn.getresponse() + latest_version = response.read().decode('utf-8') + if __version__ and parse_version(__version__) < parse_version(latest_version): + warnings.warn('Looks like you are using outdated version of deepchecks. consider upgrading using' + ' pip install -U deepchecks') +except: # pylint: disable=bare-except # noqa + pass
{"golden_diff": "diff --git a/deepchecks/__init__.py b/deepchecks/__init__.py\n--- a/deepchecks/__init__.py\n+++ b/deepchecks/__init__.py\n@@ -11,10 +11,11 @@\n \"\"\"Deepchecks.\"\"\"\n import os\n import pathlib\n+import http.client\n import warnings\n import matplotlib\n import plotly.io as pio\n-\n+from pkg_resources import parse_version\n \n from deepchecks.utils.ipython import is_notebook\n from deepchecks.tabular import (\n@@ -95,6 +96,20 @@\n MODULE_DIR = pathlib.Path(__file__).absolute().parent.parent\n with open(os.path.join(MODULE_DIR, 'VERSION'), 'r', encoding='utf-8') as f:\n __version__ = f.read().strip()\n-except Exception: # pylint: disable=broad-except\n+except: # pylint: disable=bare-except # noqa\n # If version file can't be found, leave version empty\n __version__ = ''\n+\n+# Check for latest version\n+try:\n+ disable = os.environ.get('DEEPCHECKS_DISABLE_LATEST', 'false').lower() == 'true'\n+ if not disable:\n+ conn = http.client.HTTPSConnection('api.deepchecks.com', timeout=3)\n+ conn.request('GET', '/latest')\n+ response = conn.getresponse()\n+ latest_version = response.read().decode('utf-8')\n+ if __version__ and parse_version(__version__) < parse_version(latest_version):\n+ warnings.warn('Looks like you are using outdated version of deepchecks. consider upgrading using'\n+ ' pip install -U deepchecks')\n+except: # pylint: disable=bare-except # noqa\n+ pass\n", "issue": "[FEAT][GENERAL] check for the latest version and notify if using an older version\n**Describe the solution you'd like**\r\nCheck that the latest version is installed and raise a warning when importing the package if it's not the latest version.\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Deepchecks.\"\"\"\nimport os\nimport pathlib\nimport warnings\nimport matplotlib\nimport plotly.io as pio\n\n\nfrom deepchecks.utils.ipython import is_notebook\nfrom deepchecks.tabular import (\n Dataset,\n Suite,\n Context,\n SingleDatasetCheck,\n TrainTestCheck,\n ModelOnlyCheck,\n ModelComparisonCheck,\n ModelComparisonSuite,\n)\nfrom deepchecks.core import (\n BaseCheck,\n BaseSuite,\n CheckResult,\n CheckFailure,\n SuiteResult,\n Condition,\n ConditionResult,\n ConditionCategory,\n SingleDatasetBaseCheck,\n TrainTestBaseCheck,\n ModelOnlyBaseCheck\n)\n\n\nwarnings.warn(\n # TODO: better message\n 'Ability to import base tabular functionality from '\n 'the `deepchecks` directly is deprecated, please import from '\n '`deepchecks.tabular` instead',\n DeprecationWarning\n)\n\n\n__all__ = [\n 'BaseCheck',\n 'SingleDatasetBaseCheck',\n 'TrainTestBaseCheck',\n 'ModelOnlyBaseCheck',\n 'ModelComparisonCheck',\n 'CheckResult',\n 'CheckFailure',\n 'Condition',\n 'ConditionResult',\n 'ConditionCategory',\n 'BaseSuite',\n 'SuiteResult',\n\n # tabular checks\n 'SingleDatasetCheck',\n 'TrainTestCheck',\n 'ModelOnlyCheck',\n 'Dataset',\n 'Suite',\n 'ModelComparisonSuite',\n 'Context'\n]\n\n\n# Matplotlib has multiple backends. If we are in a context that does not support GUI (For example, during unit tests)\n# we can't use a GUI backend. Thus we must use a non-GUI backend.\nif not is_notebook():\n matplotlib.use('Agg')\n\n# We can't rely on that the user will have an active internet connection, thus we change the default backend to\n# \"notebook\" If plotly detects the 'notebook-connected' backend.\n# for more info, see: https://plotly.com/python/renderers/\npio_backends = pio.renderers.default.split('+')\nif 'notebook_connected' in pio_backends:\n pio_backends[pio_backends.index('notebook_connected')] = 'notebook'\n pio.renderers.default = '+'.join(pio_backends)\n\n\n# Set version info\ntry:\n MODULE_DIR = pathlib.Path(__file__).absolute().parent.parent\n with open(os.path.join(MODULE_DIR, 'VERSION'), 'r', encoding='utf-8') as f:\n __version__ = f.read().strip()\nexcept Exception: # pylint: disable=broad-except\n # If version file can't be found, leave version empty\n __version__ = ''\n", "path": "deepchecks/__init__.py"}]}
1,460
384
gh_patches_debug_23114
rasdani/github-patches
git_diff
mozilla__telemetry-analysis-service-441
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use `select_for_update` in cluster expiration email task There's a bit of a race condition [here](https://github.com/mozilla/telemetry-analysis-service/blob/e6fecbe12d09b2e2338ae62f5276b3b2f39b0b65/atmo/clusters/tasks.py#L38) where two tasks could ask for expiring clusters at the same time and start sending emails before they can be marked as sent. We should 1. wrap the whole task in a transaction and 2. use `select_for_update` on the query for active clusters to lock those rows. </issue> <code> [start of atmo/clusters/tasks.py] 1 # This Source Code Form is subject to the terms of the Mozilla Public 2 # License, v. 2.0. If a copy of the MPL was not distributed with this 3 # file, you can obtain one at http://mozilla.org/MPL/2.0/. 4 from datetime import timedelta 5 6 import mail_builder 7 from botocore.exceptions import ClientError 8 from celery.utils.log import get_task_logger 9 from django.conf import settings 10 from django.db import transaction 11 from django.utils import timezone 12 13 from ..celery import celery 14 from .models import Cluster 15 from .provisioners import ClusterProvisioner 16 17 logger = get_task_logger(__name__) 18 19 20 @celery.task 21 def deactivate_clusters(): 22 now = timezone.now() 23 deactivated_clusters = [] 24 for cluster in Cluster.objects.active().filter(end_date__lte=now): 25 with transaction.atomic(): 26 deactivated_clusters.append([cluster.identifier, cluster.pk]) 27 # The cluster is expired 28 logger.info( 29 'Cluster %s (%s) is expired, deactivating.', 30 cluster.pk, 31 cluster.identifier, 32 ) 33 cluster.deactivate() 34 return deactivated_clusters 35 36 37 @celery.task 38 def send_expiration_mails(): 39 deadline = timezone.now() + timedelta(hours=1) 40 soon_expired = Cluster.objects.active().filter( 41 end_date__lte=deadline, 42 expiration_mail_sent=False, 43 ) 44 for cluster in soon_expired: 45 with transaction.atomic(): 46 message = mail_builder.build_message( 47 'atmo/clusters/mails/expiration.mail', { 48 'cluster': cluster, 49 'deadline': deadline, 50 'settings': settings, 51 }, 52 ) 53 message.send() 54 cluster.expiration_mail_sent = True 55 cluster.save() 56 57 58 @celery.task(max_retries=3) 59 @celery.autoretry(ClientError) 60 def update_master_address(cluster_id, force=False): 61 """ 62 Update the public IP address for the cluster with the given cluster ID 63 """ 64 cluster = Cluster.objects.get(id=cluster_id) 65 # quick way out in case this job was called accidently 66 if cluster.master_address and not force: 67 return 68 # first get the cluster info from AWS 69 info = cluster.info 70 master_address = info.get('public_dns') or '' 71 # then store the public IP of the cluster if found in response 72 if master_address: 73 cluster.master_address = master_address 74 cluster.save() 75 return master_address 76 77 78 @celery.task(max_retries=3) 79 @celery.autoretry(ClientError) 80 def update_clusters(): 81 """ 82 Update the cluster metadata from AWS for the pending 83 clusters. 84 85 - To be used periodically. 86 - Won't update state if not needed. 87 - Will queue updating the Cluster's public IP address if needed. 88 """ 89 # only update the cluster info for clusters that are pending 90 active_clusters = Cluster.objects.active() 91 92 # Short-circuit for no active clusters (e.g. on weekends) 93 if not active_clusters.exists(): 94 return [] 95 96 # get the start dates of the active clusters, set to the start of the day 97 # to counteract time differences between atmo and AWS and use the oldest 98 # start date to limit the ListCluster API call to AWS 99 oldest_start_date = active_clusters.datetimes('start_date', 'day') 100 101 # build a mapping between jobflow ID and cluster info 102 cluster_mapping = {} 103 provisioner = ClusterProvisioner() 104 cluster_list = provisioner.list( 105 created_after=oldest_start_date[0] 106 ) 107 for cluster_info in cluster_list: 108 cluster_mapping[cluster_info['jobflow_id']] = cluster_info 109 110 # go through pending clusters and update the state if needed 111 updated_clusters = [] 112 for cluster in active_clusters: 113 with transaction.atomic(): 114 info = cluster_mapping.get(cluster.jobflow_id) 115 # ignore if no info was found for some reason, 116 # the cluster was deleted in AWS but it wasn't deleted here yet 117 if info is None: 118 continue 119 120 # don't update the state if it's equal to the already stored state 121 if info['state'] == cluster.most_recent_status: 122 continue 123 124 # run an UPDATE query for the cluster 125 cluster.most_recent_status = info['state'] 126 cluster.save() 127 128 updated_clusters.append(cluster.identifier) 129 130 # if not given enqueue a job to update the public IP address 131 # but only if the cluster is running or waiting, so the 132 # API call isn't wasted 133 if (not cluster.master_address and 134 cluster.most_recent_status in cluster.READY_STATUS_LIST): 135 transaction.on_commit( 136 lambda: update_master_address.delay(cluster.id) 137 ) 138 return updated_clusters 139 [end of atmo/clusters/tasks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/atmo/clusters/tasks.py b/atmo/clusters/tasks.py --- a/atmo/clusters/tasks.py +++ b/atmo/clusters/tasks.py @@ -37,22 +37,23 @@ @celery.task def send_expiration_mails(): deadline = timezone.now() + timedelta(hours=1) - soon_expired = Cluster.objects.active().filter( - end_date__lte=deadline, - expiration_mail_sent=False, - ) - for cluster in soon_expired: - with transaction.atomic(): - message = mail_builder.build_message( - 'atmo/clusters/mails/expiration.mail', { - 'cluster': cluster, - 'deadline': deadline, - 'settings': settings, - }, - ) - message.send() - cluster.expiration_mail_sent = True - cluster.save() + with transaction.atomic(): + soon_expired = Cluster.objects.select_for_update().active().filter( + end_date__lte=deadline, + expiration_mail_sent=False, + ) + for cluster in soon_expired: + with transaction.atomic(): + message = mail_builder.build_message( + 'atmo/clusters/mails/expiration.mail', { + 'cluster': cluster, + 'deadline': deadline, + 'settings': settings, + }, + ) + message.send() + cluster.expiration_mail_sent = True + cluster.save() @celery.task(max_retries=3)
{"golden_diff": "diff --git a/atmo/clusters/tasks.py b/atmo/clusters/tasks.py\n--- a/atmo/clusters/tasks.py\n+++ b/atmo/clusters/tasks.py\n@@ -37,22 +37,23 @@\n @celery.task\n def send_expiration_mails():\n deadline = timezone.now() + timedelta(hours=1)\n- soon_expired = Cluster.objects.active().filter(\n- end_date__lte=deadline,\n- expiration_mail_sent=False,\n- )\n- for cluster in soon_expired:\n- with transaction.atomic():\n- message = mail_builder.build_message(\n- 'atmo/clusters/mails/expiration.mail', {\n- 'cluster': cluster,\n- 'deadline': deadline,\n- 'settings': settings,\n- },\n- )\n- message.send()\n- cluster.expiration_mail_sent = True\n- cluster.save()\n+ with transaction.atomic():\n+ soon_expired = Cluster.objects.select_for_update().active().filter(\n+ end_date__lte=deadline,\n+ expiration_mail_sent=False,\n+ )\n+ for cluster in soon_expired:\n+ with transaction.atomic():\n+ message = mail_builder.build_message(\n+ 'atmo/clusters/mails/expiration.mail', {\n+ 'cluster': cluster,\n+ 'deadline': deadline,\n+ 'settings': settings,\n+ },\n+ )\n+ message.send()\n+ cluster.expiration_mail_sent = True\n+ cluster.save()\n \n \n @celery.task(max_retries=3)\n", "issue": "Use `select_for_update` in cluster expiration email task\nThere's a bit of a race condition [here](https://github.com/mozilla/telemetry-analysis-service/blob/e6fecbe12d09b2e2338ae62f5276b3b2f39b0b65/atmo/clusters/tasks.py#L38) where two tasks could ask for expiring clusters at the same time and start sending emails before they can be marked as sent. We should 1. wrap the whole task in a transaction and 2. use `select_for_update` on the query for active clusters to lock those rows.\n", "before_files": [{"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom datetime import timedelta\n\nimport mail_builder\nfrom botocore.exceptions import ClientError\nfrom celery.utils.log import get_task_logger\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.utils import timezone\n\nfrom ..celery import celery\nfrom .models import Cluster\nfrom .provisioners import ClusterProvisioner\n\nlogger = get_task_logger(__name__)\n\n\[email protected]\ndef deactivate_clusters():\n now = timezone.now()\n deactivated_clusters = []\n for cluster in Cluster.objects.active().filter(end_date__lte=now):\n with transaction.atomic():\n deactivated_clusters.append([cluster.identifier, cluster.pk])\n # The cluster is expired\n logger.info(\n 'Cluster %s (%s) is expired, deactivating.',\n cluster.pk,\n cluster.identifier,\n )\n cluster.deactivate()\n return deactivated_clusters\n\n\[email protected]\ndef send_expiration_mails():\n deadline = timezone.now() + timedelta(hours=1)\n soon_expired = Cluster.objects.active().filter(\n end_date__lte=deadline,\n expiration_mail_sent=False,\n )\n for cluster in soon_expired:\n with transaction.atomic():\n message = mail_builder.build_message(\n 'atmo/clusters/mails/expiration.mail', {\n 'cluster': cluster,\n 'deadline': deadline,\n 'settings': settings,\n },\n )\n message.send()\n cluster.expiration_mail_sent = True\n cluster.save()\n\n\[email protected](max_retries=3)\[email protected](ClientError)\ndef update_master_address(cluster_id, force=False):\n \"\"\"\n Update the public IP address for the cluster with the given cluster ID\n \"\"\"\n cluster = Cluster.objects.get(id=cluster_id)\n # quick way out in case this job was called accidently\n if cluster.master_address and not force:\n return\n # first get the cluster info from AWS\n info = cluster.info\n master_address = info.get('public_dns') or ''\n # then store the public IP of the cluster if found in response\n if master_address:\n cluster.master_address = master_address\n cluster.save()\n return master_address\n\n\[email protected](max_retries=3)\[email protected](ClientError)\ndef update_clusters():\n \"\"\"\n Update the cluster metadata from AWS for the pending\n clusters.\n\n - To be used periodically.\n - Won't update state if not needed.\n - Will queue updating the Cluster's public IP address if needed.\n \"\"\"\n # only update the cluster info for clusters that are pending\n active_clusters = Cluster.objects.active()\n\n # Short-circuit for no active clusters (e.g. on weekends)\n if not active_clusters.exists():\n return []\n\n # get the start dates of the active clusters, set to the start of the day\n # to counteract time differences between atmo and AWS and use the oldest\n # start date to limit the ListCluster API call to AWS\n oldest_start_date = active_clusters.datetimes('start_date', 'day')\n\n # build a mapping between jobflow ID and cluster info\n cluster_mapping = {}\n provisioner = ClusterProvisioner()\n cluster_list = provisioner.list(\n created_after=oldest_start_date[0]\n )\n for cluster_info in cluster_list:\n cluster_mapping[cluster_info['jobflow_id']] = cluster_info\n\n # go through pending clusters and update the state if needed\n updated_clusters = []\n for cluster in active_clusters:\n with transaction.atomic():\n info = cluster_mapping.get(cluster.jobflow_id)\n # ignore if no info was found for some reason,\n # the cluster was deleted in AWS but it wasn't deleted here yet\n if info is None:\n continue\n\n # don't update the state if it's equal to the already stored state\n if info['state'] == cluster.most_recent_status:\n continue\n\n # run an UPDATE query for the cluster\n cluster.most_recent_status = info['state']\n cluster.save()\n\n updated_clusters.append(cluster.identifier)\n\n # if not given enqueue a job to update the public IP address\n # but only if the cluster is running or waiting, so the\n # API call isn't wasted\n if (not cluster.master_address and\n cluster.most_recent_status in cluster.READY_STATUS_LIST):\n transaction.on_commit(\n lambda: update_master_address.delay(cluster.id)\n )\n return updated_clusters\n", "path": "atmo/clusters/tasks.py"}]}
1,990
324
gh_patches_debug_24616
rasdani/github-patches
git_diff
streamlit__streamlit-7221
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> datetimes that are close together on silder used for ranges are illegible. ### Steps to reproduce Here's an example: ```python from datetime import date import streamlit as st dates = st.slider('date range', min_value=date(2019, 8, 1), max_value=date(2021, 6, 4), value=(date(2021, 5, 21), date(2021, 6, 4))) ``` **Expected behavior:** Legible dates and handles on the slider. **Actual behavior:** ![image](https://user-images.githubusercontent.com/632049/121186310-dcb0fe00-c85e-11eb-8b88-f201d69d51f3.png) ### Is this a regression? no ### Debug info - Streamlit version: Streamlit, version 0.81.1 - Python version: Python 3.9.4 - Using Conda? yes - OS version: Windows 10 - Browser version: Latest Chrome ### Additional information It probably makes sense to right-align the lower end of the range and left-align the upper end of the range. </issue> <code> [start of e2e/scripts/st_slider.py] 1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022) 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import streamlit as st 16 from streamlit import runtime 17 18 s1 = st.sidebar.slider("Label A", 0, 12345678, 12345678) 19 st.sidebar.write("Value A:", s1) 20 21 r1 = st.sidebar.slider("Range A", 10000, 25000, [10000, 25000]) 22 st.sidebar.write("Range Value A:", r1) 23 24 with st.sidebar.expander("Expander", expanded=True): 25 s2 = st.slider("Label B", 10000, 25000, 10000) 26 st.write("Value B:", s2) 27 28 r2 = st.slider("Range B", 10000, 25000, [10000, 25000]) 29 st.write("Range Value B:", r2) 30 31 w1 = st.slider("Label 1", 0, 100, 25, 1) 32 st.write("Value 1:", w1) 33 34 w2 = st.slider("Label 2", 0.0, 100.0, (25.0, 75.0), 0.5) 35 st.write("Value 2:", w2) 36 37 w3 = st.slider( 38 "Label 3 - This is a very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very long label", 39 0, 40 100, 41 1, 42 1, 43 ) 44 st.write("Value 3:", w3) 45 46 w4 = st.slider("Label 4", 10000, 25000, 10000, disabled=True) 47 st.write("Value 4:", w4) 48 49 w5 = st.slider("Label 5", 0, 100, 25, 1, label_visibility="hidden") 50 st.write("Value 5:", w5) 51 52 w6 = st.slider("Label 6", 0, 100, 36, label_visibility="collapsed") 53 st.write("Value 6:", w6) 54 55 if runtime.exists(): 56 57 def on_change(): 58 st.session_state.slider_changed = True 59 60 st.slider( 61 "Label 7", 62 min_value=0, 63 max_value=100, 64 value=25, 65 step=1, 66 key="slider7", 67 on_change=on_change, 68 ) 69 st.write("Value 7:", st.session_state.slider7) 70 st.write("Slider changed:", "slider_changed" in st.session_state) 71 [end of e2e/scripts/st_slider.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/e2e/scripts/st_slider.py b/e2e/scripts/st_slider.py --- a/e2e/scripts/st_slider.py +++ b/e2e/scripts/st_slider.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from datetime import date + import streamlit as st from streamlit import runtime @@ -52,19 +54,27 @@ w6 = st.slider("Label 6", 0, 100, 36, label_visibility="collapsed") st.write("Value 6:", w6) +dates = st.slider( + "Label 7", + min_value=date(2019, 8, 1), + max_value=date(2021, 6, 4), + value=(date(2019, 8, 1), date(2019, 9, 1)), +) +st.write("Value 7:", dates[0], dates[1]) + if runtime.exists(): def on_change(): st.session_state.slider_changed = True st.slider( - "Label 7", + "Label 8", min_value=0, max_value=100, value=25, step=1, - key="slider7", + key="slider8", on_change=on_change, ) - st.write("Value 7:", st.session_state.slider7) + st.write("Value 8:", st.session_state.slider8) st.write("Slider changed:", "slider_changed" in st.session_state)
{"golden_diff": "diff --git a/e2e/scripts/st_slider.py b/e2e/scripts/st_slider.py\n--- a/e2e/scripts/st_slider.py\n+++ b/e2e/scripts/st_slider.py\n@@ -12,6 +12,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+from datetime import date\n+\n import streamlit as st\n from streamlit import runtime\n \n@@ -52,19 +54,27 @@\n w6 = st.slider(\"Label 6\", 0, 100, 36, label_visibility=\"collapsed\")\n st.write(\"Value 6:\", w6)\n \n+dates = st.slider(\n+ \"Label 7\",\n+ min_value=date(2019, 8, 1),\n+ max_value=date(2021, 6, 4),\n+ value=(date(2019, 8, 1), date(2019, 9, 1)),\n+)\n+st.write(\"Value 7:\", dates[0], dates[1])\n+\n if runtime.exists():\n \n def on_change():\n st.session_state.slider_changed = True\n \n st.slider(\n- \"Label 7\",\n+ \"Label 8\",\n min_value=0,\n max_value=100,\n value=25,\n step=1,\n- key=\"slider7\",\n+ key=\"slider8\",\n on_change=on_change,\n )\n- st.write(\"Value 7:\", st.session_state.slider7)\n+ st.write(\"Value 8:\", st.session_state.slider8)\n st.write(\"Slider changed:\", \"slider_changed\" in st.session_state)\n", "issue": "datetimes that are close together on silder used for ranges are illegible.\n### Steps to reproduce\r\n\r\nHere's an example:\r\n\r\n```python\r\n\r\nfrom datetime import date\r\nimport streamlit as st\r\n\r\ndates = st.slider('date range',\r\n min_value=date(2019, 8, 1), max_value=date(2021, 6, 4),\r\n value=(date(2021, 5, 21), date(2021, 6, 4)))\r\n```\r\n\r\n**Expected behavior:**\r\n\r\nLegible dates and handles on the slider.\r\n\r\n**Actual behavior:**\r\n\r\n![image](https://user-images.githubusercontent.com/632049/121186310-dcb0fe00-c85e-11eb-8b88-f201d69d51f3.png)\r\n\r\n### Is this a regression?\r\n\r\n no\r\n\r\n### Debug info\r\n\r\n- Streamlit version: Streamlit, version 0.81.1\r\n- Python version: Python 3.9.4\r\n- Using Conda? yes\r\n- OS version: Windows 10\r\n- Browser version: Latest Chrome\r\n\r\n### Additional information\r\n\r\nIt probably makes sense to right-align the lower end of the range and left-align the upper end of the range.\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\nfrom streamlit import runtime\n\ns1 = st.sidebar.slider(\"Label A\", 0, 12345678, 12345678)\nst.sidebar.write(\"Value A:\", s1)\n\nr1 = st.sidebar.slider(\"Range A\", 10000, 25000, [10000, 25000])\nst.sidebar.write(\"Range Value A:\", r1)\n\nwith st.sidebar.expander(\"Expander\", expanded=True):\n s2 = st.slider(\"Label B\", 10000, 25000, 10000)\n st.write(\"Value B:\", s2)\n\n r2 = st.slider(\"Range B\", 10000, 25000, [10000, 25000])\n st.write(\"Range Value B:\", r2)\n\nw1 = st.slider(\"Label 1\", 0, 100, 25, 1)\nst.write(\"Value 1:\", w1)\n\nw2 = st.slider(\"Label 2\", 0.0, 100.0, (25.0, 75.0), 0.5)\nst.write(\"Value 2:\", w2)\n\nw3 = st.slider(\n \"Label 3 - This is a very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very long label\",\n 0,\n 100,\n 1,\n 1,\n)\nst.write(\"Value 3:\", w3)\n\nw4 = st.slider(\"Label 4\", 10000, 25000, 10000, disabled=True)\nst.write(\"Value 4:\", w4)\n\nw5 = st.slider(\"Label 5\", 0, 100, 25, 1, label_visibility=\"hidden\")\nst.write(\"Value 5:\", w5)\n\nw6 = st.slider(\"Label 6\", 0, 100, 36, label_visibility=\"collapsed\")\nst.write(\"Value 6:\", w6)\n\nif runtime.exists():\n\n def on_change():\n st.session_state.slider_changed = True\n\n st.slider(\n \"Label 7\",\n min_value=0,\n max_value=100,\n value=25,\n step=1,\n key=\"slider7\",\n on_change=on_change,\n )\n st.write(\"Value 7:\", st.session_state.slider7)\n st.write(\"Slider changed:\", \"slider_changed\" in st.session_state)\n", "path": "e2e/scripts/st_slider.py"}]}
1,719
370
gh_patches_debug_8325
rasdani/github-patches
git_diff
Kinto__kinto-1101
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> __flush__ produces an HTML reponse The response for the flush endpoint is a raw HTML response from Pyramid. I suggest make it a JSON response for compatibility with the rest of the API. Maybe return `{}`? ``` gsurita-30820:kinto gsurita$ http post http://localhost:8888/v1/__flush__ HTTP/1.1 202 Accepted Content-Length: 157 Content-Type: text/html; charset=UTF-8 Date: Tue, 21 Feb 2017 13:56:56 GMT Server: waitress X-Content-Type-Options: nosniff <html> <head> <title>202 Accepted</title> </head> <body> <h1>202 Accepted</h1> The request is accepted for processing.<br/><br/> </body> </html> __flush__ produces an HTML reponse The response for the flush endpoint is a raw HTML response from Pyramid. I suggest make it a JSON response for compatibility with the rest of the API. Maybe return `{}`? ``` gsurita-30820:kinto gsurita$ http post http://localhost:8888/v1/__flush__ HTTP/1.1 202 Accepted Content-Length: 157 Content-Type: text/html; charset=UTF-8 Date: Tue, 21 Feb 2017 13:56:56 GMT Server: waitress X-Content-Type-Options: nosniff <html> <head> <title>202 Accepted</title> </head> <body> <h1>202 Accepted</h1> The request is accepted for processing.<br/><br/> </body> </html> </issue> <code> [start of kinto/views/flush.py] 1 from cornice import Service 2 from pyramid import httpexceptions 3 from pyramid.security import NO_PERMISSION_REQUIRED 4 5 from kinto.events import ServerFlushed 6 7 flush = Service(name='flush', 8 description='Clear database content', 9 path='/__flush__') 10 11 12 @flush.post(permission=NO_PERMISSION_REQUIRED) 13 def flush_post(request): 14 request.registry.storage.flush() 15 request.registry.permission.flush() 16 request.registry.cache.flush() 17 event = ServerFlushed(request) 18 request.registry.notify(event) 19 return httpexceptions.HTTPAccepted() 20 [end of kinto/views/flush.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kinto/views/flush.py b/kinto/views/flush.py --- a/kinto/views/flush.py +++ b/kinto/views/flush.py @@ -1,5 +1,4 @@ from cornice import Service -from pyramid import httpexceptions from pyramid.security import NO_PERMISSION_REQUIRED from kinto.events import ServerFlushed @@ -16,4 +15,6 @@ request.registry.cache.flush() event = ServerFlushed(request) request.registry.notify(event) - return httpexceptions.HTTPAccepted() + + request.response.status = 202 + return {}
{"golden_diff": "diff --git a/kinto/views/flush.py b/kinto/views/flush.py\n--- a/kinto/views/flush.py\n+++ b/kinto/views/flush.py\n@@ -1,5 +1,4 @@\n from cornice import Service\n-from pyramid import httpexceptions\n from pyramid.security import NO_PERMISSION_REQUIRED\n \n from kinto.events import ServerFlushed\n@@ -16,4 +15,6 @@\n request.registry.cache.flush()\n event = ServerFlushed(request)\n request.registry.notify(event)\n- return httpexceptions.HTTPAccepted()\n+\n+ request.response.status = 202\n+ return {}\n", "issue": "__flush__ produces an HTML reponse \nThe response for the flush endpoint is a raw HTML response from Pyramid. I suggest make it a JSON response for compatibility with the rest of the API. Maybe return `{}`?\r\n\r\n```\r\ngsurita-30820:kinto gsurita$ http post http://localhost:8888/v1/__flush__\r\nHTTP/1.1 202 Accepted\r\nContent-Length: 157\r\nContent-Type: text/html; charset=UTF-8\r\nDate: Tue, 21 Feb 2017 13:56:56 GMT\r\nServer: waitress\r\nX-Content-Type-Options: nosniff\r\n\r\n<html>\r\n <head>\r\n <title>202 Accepted</title>\r\n </head>\r\n <body>\r\n <h1>202 Accepted</h1>\r\n The request is accepted for processing.<br/><br/>\r\n\r\n\r\n\r\n </body>\r\n</html>\n__flush__ produces an HTML reponse \nThe response for the flush endpoint is a raw HTML response from Pyramid. I suggest make it a JSON response for compatibility with the rest of the API. Maybe return `{}`?\r\n\r\n```\r\ngsurita-30820:kinto gsurita$ http post http://localhost:8888/v1/__flush__\r\nHTTP/1.1 202 Accepted\r\nContent-Length: 157\r\nContent-Type: text/html; charset=UTF-8\r\nDate: Tue, 21 Feb 2017 13:56:56 GMT\r\nServer: waitress\r\nX-Content-Type-Options: nosniff\r\n\r\n<html>\r\n <head>\r\n <title>202 Accepted</title>\r\n </head>\r\n <body>\r\n <h1>202 Accepted</h1>\r\n The request is accepted for processing.<br/><br/>\r\n\r\n\r\n\r\n </body>\r\n</html>\n", "before_files": [{"content": "from cornice import Service\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\n\nfrom kinto.events import ServerFlushed\n\nflush = Service(name='flush',\n description='Clear database content',\n path='/__flush__')\n\n\[email protected](permission=NO_PERMISSION_REQUIRED)\ndef flush_post(request):\n request.registry.storage.flush()\n request.registry.permission.flush()\n request.registry.cache.flush()\n event = ServerFlushed(request)\n request.registry.notify(event)\n return httpexceptions.HTTPAccepted()\n", "path": "kinto/views/flush.py"}]}
1,076
132
gh_patches_debug_6022
rasdani/github-patches
git_diff
tensorflow__addons-2572
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Documentation for Snake activation function is incorrect **System information** - OS Platform and Distribution (e.g., Linux Ubuntu 16.04): NA - TensorFlow version and how it was installed (source or binary): NA - TensorFlow-Addons version and how it was installed (source or binary): NA - Python version: NA - Is GPU used? (yes/no): NA **Describe the bug** Documentation for Snake activation function is incorrect. The current documentation says this: ![image](https://user-images.githubusercontent.com/40834245/133940822-d67e82ab-63e2-4445-ba96-b9c3102f6818.png) Ref: https://www.tensorflow.org/addons/api_docs/python/tfa/activations/snake But, `x` should be out of the fraction to make it consistent with the original paper as well as the code. Actual: ![image](https://user-images.githubusercontent.com/40834245/133940792-1966e5e4-fae2-4180-9492-27d489df63e4.png) Reference: https://arxiv.org/abs/2006.08195 **Code to reproduce the issue** NA **Other info / logs** NA </issue> <code> [start of tensorflow_addons/activations/snake.py] 1 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 16 import tensorflow as tf 17 18 from tensorflow_addons.utils import types 19 20 21 @tf.keras.utils.register_keras_serializable(package="Addons") 22 def snake(x: types.TensorLike, frequency: types.Number = 1) -> tf.Tensor: 23 r"""Snake activation to learn periodic functions. 24 25 Computes snake activation: 26 27 $$ 28 \mathrm{snake}(x) = \frac{x + (1 - \cos(2 \cdot \mathrm{frequency} \cdot x))}{2 \cdot \mathrm{frequency}}. 29 $$ 30 31 See [Neural Networks Fail to Learn Periodic Functions and How to Fix It](https://arxiv.org/abs/2006.08195). 32 33 Usage: 34 35 >>> x = tf.constant([-1.0, 0.0, 1.0]) 36 >>> tfa.activations.snake(x) 37 <tf.Tensor: shape=(3,), dtype=float32, numpy=array([-0.29192656, 0. , 1.7080734 ], dtype=float32)> 38 39 Args: 40 x: A `Tensor`. 41 frequency: A scalar, frequency of the periodic part. 42 Returns: 43 A `Tensor`. Has the same type as `x`. 44 """ 45 x = tf.convert_to_tensor(x) 46 frequency = tf.cast(frequency, x.dtype) 47 48 return x + (1 - tf.cos(2 * frequency * x)) / (2 * frequency) 49 [end of tensorflow_addons/activations/snake.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tensorflow_addons/activations/snake.py b/tensorflow_addons/activations/snake.py --- a/tensorflow_addons/activations/snake.py +++ b/tensorflow_addons/activations/snake.py @@ -25,7 +25,7 @@ Computes snake activation: $$ - \mathrm{snake}(x) = \frac{x + (1 - \cos(2 \cdot \mathrm{frequency} \cdot x))}{2 \cdot \mathrm{frequency}}. + \mathrm{snake}(x) = \mathrm{x} + \frac{1 - \cos(2 \cdot \mathrm{frequency} \cdot x)}{2 \cdot \mathrm{frequency}}. $$ See [Neural Networks Fail to Learn Periodic Functions and How to Fix It](https://arxiv.org/abs/2006.08195).
{"golden_diff": "diff --git a/tensorflow_addons/activations/snake.py b/tensorflow_addons/activations/snake.py\n--- a/tensorflow_addons/activations/snake.py\n+++ b/tensorflow_addons/activations/snake.py\n@@ -25,7 +25,7 @@\n Computes snake activation:\n \n $$\n- \\mathrm{snake}(x) = \\frac{x + (1 - \\cos(2 \\cdot \\mathrm{frequency} \\cdot x))}{2 \\cdot \\mathrm{frequency}}.\n+ \\mathrm{snake}(x) = \\mathrm{x} + \\frac{1 - \\cos(2 \\cdot \\mathrm{frequency} \\cdot x)}{2 \\cdot \\mathrm{frequency}}.\n $$\n \n See [Neural Networks Fail to Learn Periodic Functions and How to Fix It](https://arxiv.org/abs/2006.08195).\n", "issue": "Documentation for Snake activation function is incorrect\n**System information**\r\n- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): NA\r\n- TensorFlow version and how it was installed (source or binary): NA \r\n- TensorFlow-Addons version and how it was installed (source or binary): NA\r\n- Python version: NA\r\n- Is GPU used? (yes/no): NA\r\n\r\n**Describe the bug**\r\n\r\nDocumentation for Snake activation function is incorrect. The current documentation says this:\r\n![image](https://user-images.githubusercontent.com/40834245/133940822-d67e82ab-63e2-4445-ba96-b9c3102f6818.png)\r\n\r\nRef: https://www.tensorflow.org/addons/api_docs/python/tfa/activations/snake\r\n\r\n\r\nBut, `x` should be out of the fraction to make it consistent with the original paper as well as the code.\r\n\r\nActual: \r\n![image](https://user-images.githubusercontent.com/40834245/133940792-1966e5e4-fae2-4180-9492-27d489df63e4.png)\r\n\r\nReference: https://arxiv.org/abs/2006.08195\r\n\r\n**Code to reproduce the issue**\r\n\r\nNA\r\n\r\n**Other info / logs**\r\n\r\nNA\r\n\n", "before_files": [{"content": "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport tensorflow as tf\n\nfrom tensorflow_addons.utils import types\n\n\[email protected]_keras_serializable(package=\"Addons\")\ndef snake(x: types.TensorLike, frequency: types.Number = 1) -> tf.Tensor:\n r\"\"\"Snake activation to learn periodic functions.\n\n Computes snake activation:\n\n $$\n \\mathrm{snake}(x) = \\frac{x + (1 - \\cos(2 \\cdot \\mathrm{frequency} \\cdot x))}{2 \\cdot \\mathrm{frequency}}.\n $$\n\n See [Neural Networks Fail to Learn Periodic Functions and How to Fix It](https://arxiv.org/abs/2006.08195).\n\n Usage:\n\n >>> x = tf.constant([-1.0, 0.0, 1.0])\n >>> tfa.activations.snake(x)\n <tf.Tensor: shape=(3,), dtype=float32, numpy=array([-0.29192656, 0. , 1.7080734 ], dtype=float32)>\n\n Args:\n x: A `Tensor`.\n frequency: A scalar, frequency of the periodic part.\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n x = tf.convert_to_tensor(x)\n frequency = tf.cast(frequency, x.dtype)\n\n return x + (1 - tf.cos(2 * frequency * x)) / (2 * frequency)\n", "path": "tensorflow_addons/activations/snake.py"}]}
1,414
206
gh_patches_debug_442
rasdani/github-patches
git_diff
saleor__saleor-5259
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Customer shouldn't be able to query draft order. Customer shouldn't be able to query draft order, by id and by token. </issue> <code> [start of saleor/graphql/order/resolvers.py] 1 import graphene 2 import graphene_django_optimizer as gql_optimizer 3 4 from ...order import OrderStatus, models 5 from ...order.events import OrderEvents 6 from ...order.models import OrderEvent 7 from ...order.utils import sum_order_totals 8 from ..utils import filter_by_period, filter_by_query_param, sort_queryset 9 from .enums import OrderStatusFilter 10 from .sorters import OrderSortField 11 from .types import Order 12 13 ORDER_SEARCH_FIELDS = ("id", "discount_name", "token", "user_email", "user__email") 14 15 16 def filter_orders(qs, info, created, status, query): 17 qs = filter_by_query_param(qs, query, ORDER_SEARCH_FIELDS) 18 19 # DEPRECATED: Will be removed in Saleor 2.11, use the `filter` field instead. 20 # filter orders by status 21 if status is not None: 22 if status == OrderStatusFilter.READY_TO_FULFILL: 23 qs = qs.ready_to_fulfill() 24 elif status == OrderStatusFilter.READY_TO_CAPTURE: 25 qs = qs.ready_to_capture() 26 27 # DEPRECATED: Will be removed in Saleor 2.11, use the `filter` field instead. 28 # filter orders by creation date 29 if created is not None: 30 qs = filter_by_period(qs, created, "created") 31 32 return gql_optimizer.query(qs, info) 33 34 35 def resolve_orders(info, created, status, query, sort_by=None): 36 qs = models.Order.objects.confirmed() 37 qs = sort_queryset(qs, sort_by, OrderSortField) 38 return filter_orders(qs, info, created, status, query) 39 40 41 def resolve_draft_orders(info, created, query, sort_by=None): 42 qs = models.Order.objects.drafts() 43 qs = sort_queryset(qs, sort_by, OrderSortField) 44 return filter_orders(qs, info, created, None, query) 45 46 47 def resolve_orders_total(_info, period): 48 qs = models.Order.objects.confirmed().exclude(status=OrderStatus.CANCELED) 49 qs = filter_by_period(qs, period, "created") 50 return sum_order_totals(qs) 51 52 53 def resolve_order(info, order_id): 54 return graphene.Node.get_node_from_global_id(info, order_id, Order) 55 56 57 def resolve_homepage_events(): 58 # Filter only selected events to be displayed on homepage. 59 types = [ 60 OrderEvents.PLACED, 61 OrderEvents.PLACED_FROM_DRAFT, 62 OrderEvents.ORDER_FULLY_PAID, 63 ] 64 return OrderEvent.objects.filter(type__in=types) 65 66 67 def resolve_order_by_token(token): 68 return models.Order.objects.filter(token=token).first() 69 [end of saleor/graphql/order/resolvers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/graphql/order/resolvers.py b/saleor/graphql/order/resolvers.py --- a/saleor/graphql/order/resolvers.py +++ b/saleor/graphql/order/resolvers.py @@ -65,4 +65,8 @@ def resolve_order_by_token(token): - return models.Order.objects.filter(token=token).first() + return ( + models.Order.objects.exclude(status=OrderStatus.DRAFT) + .filter(token=token) + .first() + )
{"golden_diff": "diff --git a/saleor/graphql/order/resolvers.py b/saleor/graphql/order/resolvers.py\n--- a/saleor/graphql/order/resolvers.py\n+++ b/saleor/graphql/order/resolvers.py\n@@ -65,4 +65,8 @@\n \n \n def resolve_order_by_token(token):\n- return models.Order.objects.filter(token=token).first()\n+ return (\n+ models.Order.objects.exclude(status=OrderStatus.DRAFT)\n+ .filter(token=token)\n+ .first()\n+ )\n", "issue": "Customer shouldn't be able to query draft order. \nCustomer shouldn't be able to query draft order, by id and by token. \n", "before_files": [{"content": "import graphene\nimport graphene_django_optimizer as gql_optimizer\n\nfrom ...order import OrderStatus, models\nfrom ...order.events import OrderEvents\nfrom ...order.models import OrderEvent\nfrom ...order.utils import sum_order_totals\nfrom ..utils import filter_by_period, filter_by_query_param, sort_queryset\nfrom .enums import OrderStatusFilter\nfrom .sorters import OrderSortField\nfrom .types import Order\n\nORDER_SEARCH_FIELDS = (\"id\", \"discount_name\", \"token\", \"user_email\", \"user__email\")\n\n\ndef filter_orders(qs, info, created, status, query):\n qs = filter_by_query_param(qs, query, ORDER_SEARCH_FIELDS)\n\n # DEPRECATED: Will be removed in Saleor 2.11, use the `filter` field instead.\n # filter orders by status\n if status is not None:\n if status == OrderStatusFilter.READY_TO_FULFILL:\n qs = qs.ready_to_fulfill()\n elif status == OrderStatusFilter.READY_TO_CAPTURE:\n qs = qs.ready_to_capture()\n\n # DEPRECATED: Will be removed in Saleor 2.11, use the `filter` field instead.\n # filter orders by creation date\n if created is not None:\n qs = filter_by_period(qs, created, \"created\")\n\n return gql_optimizer.query(qs, info)\n\n\ndef resolve_orders(info, created, status, query, sort_by=None):\n qs = models.Order.objects.confirmed()\n qs = sort_queryset(qs, sort_by, OrderSortField)\n return filter_orders(qs, info, created, status, query)\n\n\ndef resolve_draft_orders(info, created, query, sort_by=None):\n qs = models.Order.objects.drafts()\n qs = sort_queryset(qs, sort_by, OrderSortField)\n return filter_orders(qs, info, created, None, query)\n\n\ndef resolve_orders_total(_info, period):\n qs = models.Order.objects.confirmed().exclude(status=OrderStatus.CANCELED)\n qs = filter_by_period(qs, period, \"created\")\n return sum_order_totals(qs)\n\n\ndef resolve_order(info, order_id):\n return graphene.Node.get_node_from_global_id(info, order_id, Order)\n\n\ndef resolve_homepage_events():\n # Filter only selected events to be displayed on homepage.\n types = [\n OrderEvents.PLACED,\n OrderEvents.PLACED_FROM_DRAFT,\n OrderEvents.ORDER_FULLY_PAID,\n ]\n return OrderEvent.objects.filter(type__in=types)\n\n\ndef resolve_order_by_token(token):\n return models.Order.objects.filter(token=token).first()\n", "path": "saleor/graphql/order/resolvers.py"}]}
1,262
111
gh_patches_debug_14594
rasdani/github-patches
git_diff
microsoft__Qcodes-54
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> version requirement of dependencies When I try installing Qcodes with `python setup.py develop` I get an error: ``` Traceback (most recent call last): File ".\setup.py", line 81, in <module> if StrictVersion(module.__version__) < StrictVersion(min_version): File "C:\anaconda\lib\distutils\version.py", line 40, in __init__ self.parse(vstring) File "C:\anaconda\lib\distutils\version.py", line 137, in parse raise ValueError("invalid version number '%s'" % vstring) ValueError: invalid version number 'pyqtgraph-0.9.10-100-gf224936' ``` So I have a specific branch of pyqtgraph installed that has a somehow funny version number, but it is larger than the required `0.9.10`. Is there any way to make this work without swiching the pyqtgraph branch and then switching it back? </issue> <code> [start of setup.py] 1 from setuptools import setup, find_packages 2 from distutils.version import StrictVersion 3 from importlib import import_module 4 5 6 def readme(): 7 with open('README.md') as f: 8 return f.read() 9 10 extras = { 11 'MatPlot': ('matplotlib', '1.5'), 12 'QtPlot': ('pyqtgraph', '0.9.10'), 13 'coverage tests': ('coverage', '4.0') 14 } 15 extras_require = {k: '>='.join(v) for k, v in extras.items()} 16 17 setup(name='qcodes', 18 version='0.1.0', 19 use_2to3=False, 20 author='Alex Johnson', 21 author_email='[email protected]', 22 maintainer='Alex Johnson', 23 maintainer_email='[email protected]', 24 description='Python-based data acquisition framework developed by the ' 25 'Copenhagen / Delft / Sydney / Microsoft quantum computing ' 26 'consortium', 27 long_description=readme(), 28 url='https://github.com/qdev-dk/Qcodes', 29 classifiers=[ 30 'Development Status :: 3 - Alpha', 31 'Intended Audience :: Science/Research', 32 'Programming Language :: Python :: 3 :: Only', 33 'Programming Language :: Python :: 3.3', 34 'Programming Language :: Python :: 3.4', 35 'Programming Language :: Python :: 3.5', 36 'Topic :: Scientific/Engineering' 37 ], 38 license='Private', 39 # if we want to install without tests: 40 # packages=find_packages(exclude=["*.tests", "tests"]), 41 packages=find_packages(), 42 package_data={'qcodes': ['widgets/*.js', 'widgets/*.css']}, 43 install_requires=[ 44 'numpy>=1.10', 45 'pyvisa>=1.8', 46 'IPython>=4.0', 47 'ipywidgets>=4.1', 48 # nose and coverage are only for tests, but we'd like to encourage 49 # people to run tests! 50 # coverage has a problem with setuptools on Windows, moved to extras 51 'nose>=1.3' 52 ], 53 test_suite='qcodes.tests', 54 extras_require=extras_require, 55 # I think the only part of qcodes that would care about zip_safe 56 # is utils.helpers.reload_code; users of a zip-installed package 57 # shouldn't be needing to do this anyway, but we should test first. 58 zip_safe=False) 59 60 version_template = ''' 61 ***** 62 ***** package {0} must be at least version {1}. 63 ***** Please upgrade it (pip install -U {0} or conda install {0}) 64 ***** in order to use {2} 65 ***** 66 ''' 67 68 missing_template = ''' 69 ***** 70 ***** package {0} not found 71 ***** Please install it (pip install {0} or conda install {0}) 72 ***** in order to use {1} 73 ***** 74 ''' 75 76 # now test the versions of extras 77 for extra, (module_name, min_version) in extras.items(): 78 try: 79 module = import_module(module_name) 80 if StrictVersion(module.__version__) < StrictVersion(min_version): 81 print(version_template.format(module_name, min_version, extra)) 82 except ImportError: 83 print(missing_template.format(module_name, extra)) 84 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -73,6 +73,15 @@ ***** ''' +valueerror_template = ''' +***** +***** package {0} version not understood +***** Please make sure the installed version ({1}) +***** is compatible with the minimum required version ({2}) +***** in order to use {3} +***** +''' + # now test the versions of extras for extra, (module_name, min_version) in extras.items(): try: @@ -81,3 +90,5 @@ print(version_template.format(module_name, min_version, extra)) except ImportError: print(missing_template.format(module_name, extra)) + except ValueError: + print(valueerror_template.format(module_name, module.__version__, min_version, extra))
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -73,6 +73,15 @@\n *****\n '''\n \n+valueerror_template = '''\n+*****\n+***** package {0} version not understood\n+***** Please make sure the installed version ({1})\n+***** is compatible with the minimum required version ({2})\n+***** in order to use {3}\n+*****\n+'''\n+\n # now test the versions of extras\n for extra, (module_name, min_version) in extras.items():\n try:\n@@ -81,3 +90,5 @@\n print(version_template.format(module_name, min_version, extra))\n except ImportError:\n print(missing_template.format(module_name, extra))\n+ except ValueError:\n+ print(valueerror_template.format(module_name, module.__version__, min_version, extra))\n", "issue": "version requirement of dependencies\nWhen I try installing Qcodes with `python setup.py develop` I get an error:\n\n```\nTraceback (most recent call last):\n File \".\\setup.py\", line 81, in <module>\n if StrictVersion(module.__version__) < StrictVersion(min_version):\n File \"C:\\anaconda\\lib\\distutils\\version.py\", line 40, in __init__\n self.parse(vstring)\n File \"C:\\anaconda\\lib\\distutils\\version.py\", line 137, in parse\n raise ValueError(\"invalid version number '%s'\" % vstring)\nValueError: invalid version number 'pyqtgraph-0.9.10-100-gf224936'\n```\n\nSo I have a specific branch of pyqtgraph installed that has a somehow funny version number, but it is larger than the required `0.9.10`.\n\nIs there any way to make this work without swiching the pyqtgraph branch and then switching it back?\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom distutils.version import StrictVersion\nfrom importlib import import_module\n\n\ndef readme():\n with open('README.md') as f:\n return f.read()\n\nextras = {\n 'MatPlot': ('matplotlib', '1.5'),\n 'QtPlot': ('pyqtgraph', '0.9.10'),\n 'coverage tests': ('coverage', '4.0')\n}\nextras_require = {k: '>='.join(v) for k, v in extras.items()}\n\nsetup(name='qcodes',\n version='0.1.0',\n use_2to3=False,\n author='Alex Johnson',\n author_email='[email protected]',\n maintainer='Alex Johnson',\n maintainer_email='[email protected]',\n description='Python-based data acquisition framework developed by the '\n 'Copenhagen / Delft / Sydney / Microsoft quantum computing '\n 'consortium',\n long_description=readme(),\n url='https://github.com/qdev-dk/Qcodes',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering'\n ],\n license='Private',\n # if we want to install without tests:\n # packages=find_packages(exclude=[\"*.tests\", \"tests\"]),\n packages=find_packages(),\n package_data={'qcodes': ['widgets/*.js', 'widgets/*.css']},\n install_requires=[\n 'numpy>=1.10',\n 'pyvisa>=1.8',\n 'IPython>=4.0',\n 'ipywidgets>=4.1',\n # nose and coverage are only for tests, but we'd like to encourage\n # people to run tests!\n # coverage has a problem with setuptools on Windows, moved to extras\n 'nose>=1.3'\n ],\n test_suite='qcodes.tests',\n extras_require=extras_require,\n # I think the only part of qcodes that would care about zip_safe\n # is utils.helpers.reload_code; users of a zip-installed package\n # shouldn't be needing to do this anyway, but we should test first.\n zip_safe=False)\n\nversion_template = '''\n*****\n***** package {0} must be at least version {1}.\n***** Please upgrade it (pip install -U {0} or conda install {0})\n***** in order to use {2}\n*****\n'''\n\nmissing_template = '''\n*****\n***** package {0} not found\n***** Please install it (pip install {0} or conda install {0})\n***** in order to use {1}\n*****\n'''\n\n# now test the versions of extras\nfor extra, (module_name, min_version) in extras.items():\n try:\n module = import_module(module_name)\n if StrictVersion(module.__version__) < StrictVersion(min_version):\n print(version_template.format(module_name, min_version, extra))\n except ImportError:\n print(missing_template.format(module_name, extra))\n", "path": "setup.py"}]}
1,598
182
gh_patches_debug_12417
rasdani/github-patches
git_diff
sanic-org__sanic-666
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> __main__ top-level script shadowing ImportError Given `main.py`: ```python import nonexistent # assuming this is... non-existent from sanic import Sanic, response app = Sanic(__name__) @app.route('/') async def index(request): return response.html('<p>Hello</p>') ``` When we try to import something non-existent, exception `ImportError` will be thrown, [line 38 of __main__.py](https://github.com/channelcat/sanic/blob/5fd62098bd2f2722876a0873d5856d70046d3889/sanic/__main__.py#L38) does not preserve the exception, reporting preset message (what's provided on commandline) instead. This is what we will get: ``` python -m sanic main.app No module named main found. Example File: project/sanic_server.py -> app Example Module: project.sanic_server.app ``` It is very hard to find the true cause if the import statement that failed is, let's say, buried three levels of modules deep, for example. </issue> <code> [start of sanic/__main__.py] 1 from argparse import ArgumentParser 2 from importlib import import_module 3 4 from sanic.log import log 5 from sanic.app import Sanic 6 7 if __name__ == "__main__": 8 parser = ArgumentParser(prog='sanic') 9 parser.add_argument('--host', dest='host', type=str, default='127.0.0.1') 10 parser.add_argument('--port', dest='port', type=int, default=8000) 11 parser.add_argument('--cert', dest='cert', type=str, 12 help='location of certificate for SSL') 13 parser.add_argument('--key', dest='key', type=str, 14 help='location of keyfile for SSL.') 15 parser.add_argument('--workers', dest='workers', type=int, default=1, ) 16 parser.add_argument('--debug', dest='debug', action="store_true") 17 parser.add_argument('module') 18 args = parser.parse_args() 19 20 try: 21 module_parts = args.module.split(".") 22 module_name = ".".join(module_parts[:-1]) 23 app_name = module_parts[-1] 24 25 module = import_module(module_name) 26 app = getattr(module, app_name, None) 27 if not isinstance(app, Sanic): 28 raise ValueError("Module is not a Sanic app, it is a {}. " 29 "Perhaps you meant {}.app?" 30 .format(type(app).__name__, args.module)) 31 if args.cert is not None or args.key is not None: 32 ssl = {'cert': args.cert, 'key': args.key} 33 else: 34 ssl = None 35 36 app.run(host=args.host, port=args.port, 37 workers=args.workers, debug=args.debug, ssl=ssl) 38 except ImportError: 39 log.error("No module named {} found.\n" 40 " Example File: project/sanic_server.py -> app\n" 41 " Example Module: project.sanic_server.app" 42 .format(module_name)) 43 except ValueError as e: 44 log.error("{}".format(e)) 45 [end of sanic/__main__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sanic/__main__.py b/sanic/__main__.py --- a/sanic/__main__.py +++ b/sanic/__main__.py @@ -35,10 +35,10 @@ app.run(host=args.host, port=args.port, workers=args.workers, debug=args.debug, ssl=ssl) - except ImportError: + except ImportError as e: log.error("No module named {} found.\n" " Example File: project/sanic_server.py -> app\n" " Example Module: project.sanic_server.app" - .format(module_name)) + .format(e.name)) except ValueError as e: log.error("{}".format(e))
{"golden_diff": "diff --git a/sanic/__main__.py b/sanic/__main__.py\n--- a/sanic/__main__.py\n+++ b/sanic/__main__.py\n@@ -35,10 +35,10 @@\n \n app.run(host=args.host, port=args.port,\n workers=args.workers, debug=args.debug, ssl=ssl)\n- except ImportError:\n+ except ImportError as e:\n log.error(\"No module named {} found.\\n\"\n \" Example File: project/sanic_server.py -> app\\n\"\n \" Example Module: project.sanic_server.app\"\n- .format(module_name))\n+ .format(e.name))\n except ValueError as e:\n log.error(\"{}\".format(e))\n", "issue": "__main__ top-level script shadowing ImportError\nGiven `main.py`:\r\n```python\r\nimport nonexistent # assuming this is... non-existent\r\nfrom sanic import Sanic, response\r\n\r\n\r\napp = Sanic(__name__)\r\n\r\n\r\[email protected]('/')\r\nasync def index(request):\r\n return response.html('<p>Hello</p>')\r\n```\r\n\r\nWhen we try to import something non-existent, exception `ImportError` will be thrown,\r\n[line 38 of __main__.py](https://github.com/channelcat/sanic/blob/5fd62098bd2f2722876a0873d5856d70046d3889/sanic/__main__.py#L38) does not preserve the exception, reporting preset message (what's provided on commandline) instead. This is what we will get:\r\n\r\n```\r\npython -m sanic main.app\r\nNo module named main found.\r\n Example File: project/sanic_server.py -> app\r\n Example Module: project.sanic_server.app\r\n```\r\n\r\nIt is very hard to find the true cause if the import statement that failed is, let's say, buried three levels of modules deep, for example.\n", "before_files": [{"content": "from argparse import ArgumentParser\nfrom importlib import import_module\n\nfrom sanic.log import log\nfrom sanic.app import Sanic\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(prog='sanic')\n parser.add_argument('--host', dest='host', type=str, default='127.0.0.1')\n parser.add_argument('--port', dest='port', type=int, default=8000)\n parser.add_argument('--cert', dest='cert', type=str,\n help='location of certificate for SSL')\n parser.add_argument('--key', dest='key', type=str,\n help='location of keyfile for SSL.')\n parser.add_argument('--workers', dest='workers', type=int, default=1, )\n parser.add_argument('--debug', dest='debug', action=\"store_true\")\n parser.add_argument('module')\n args = parser.parse_args()\n\n try:\n module_parts = args.module.split(\".\")\n module_name = \".\".join(module_parts[:-1])\n app_name = module_parts[-1]\n\n module = import_module(module_name)\n app = getattr(module, app_name, None)\n if not isinstance(app, Sanic):\n raise ValueError(\"Module is not a Sanic app, it is a {}. \"\n \"Perhaps you meant {}.app?\"\n .format(type(app).__name__, args.module))\n if args.cert is not None or args.key is not None:\n ssl = {'cert': args.cert, 'key': args.key}\n else:\n ssl = None\n\n app.run(host=args.host, port=args.port,\n workers=args.workers, debug=args.debug, ssl=ssl)\n except ImportError:\n log.error(\"No module named {} found.\\n\"\n \" Example File: project/sanic_server.py -> app\\n\"\n \" Example Module: project.sanic_server.app\"\n .format(module_name))\n except ValueError as e:\n log.error(\"{}\".format(e))\n", "path": "sanic/__main__.py"}]}
1,290
156
gh_patches_debug_10871
rasdani/github-patches
git_diff
nilearn__nilearn-2140
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> test_mask_reducer failing on circle CI testing the mask reducer from nilearn.decompositions.base sometimes fails on circleCI, e.g. https://travis-ci.org/nilearn/nilearn/builds/583674961?utm_source=github_status&utm_medium=notification the process gets a SIGABRT could be due to the recent unvendoring of joblib, although this error started more recently so far I haven't been able to reproduce this locally </issue> <code> [start of nilearn/__init__.py] 1 """ 2 Machine Learning module for NeuroImaging in python 3 -------------------------------------------------- 4 5 Documentation is available in the docstrings and online at 6 http://nilearn.github.io. 7 8 Contents 9 -------- 10 Nilearn aims at simplifying the use of the scikit-learn package in the context of 11 neuroimaging. It provides specific input/output functions, algorithms and 12 visualization tools. 13 14 Submodules 15 --------- 16 datasets --- Utilities to download NeuroImaging datasets 17 decoding --- Decoding tools and algorithms 18 decomposition --- Includes a subject level variant of the ICA 19 algorithm called Canonical ICA 20 connectome --- Set of tools for computing functional connectivity matrices 21 and for sparse multi-subjects learning of Gaussian graphical models 22 image --- Set of functions defining mathematical operations 23 working on Niimg-like objects 24 input_data --- includes scikit-learn tranformers and tools to 25 preprocess neuro-imaging data 26 masking --- Utilities to compute and operate on brain masks 27 mass_univariate --- Defines a Massively Univariate Linear Model 28 estimated with OLS and permutation test 29 plotting --- Plotting code for nilearn 30 region --- Set of functions for extracting region-defined 31 signals, clustering methods, connected regions extraction 32 signal --- Set of preprocessing functions for time series 33 """ 34 35 import gzip 36 import sys 37 import warnings 38 39 from distutils.version import LooseVersion 40 41 from .version import _check_module_dependencies, __version__ 42 43 44 def _py2_deprecation_warning(): 45 py2_warning = ('Python2 support is deprecated and will be removed in ' 46 'the next release. Consider switching to Python 3.6 or 3.7.' 47 ) 48 warnings.filterwarnings('once', message=py2_warning) 49 warnings.warn(message=py2_warning, 50 category=DeprecationWarning, 51 stacklevel=3, 52 ) 53 54 def _py34_deprecation_warning(): 55 py34_warning = ('Python 3.4 support is deprecated and will be removed in ' 56 'the next release. Consider switching to Python 3.6 or 3.7.' 57 ) 58 warnings.filterwarnings('once', message=py34_warning) 59 warnings.warn(message=py34_warning, 60 category=DeprecationWarning, 61 stacklevel=3, 62 ) 63 64 65 def _python_deprecation_warnings(): 66 if sys.version_info.major == 2: 67 _py2_deprecation_warning() 68 elif sys.version_info.major == 3 and sys.version_info.minor == 4: 69 _py34_deprecation_warning() 70 71 72 _check_module_dependencies() 73 _python_deprecation_warnings() 74 75 # Temporary work around to address formatting issues in doc tests 76 # with NumPy 1.14. NumPy had made more consistent str/repr formatting 77 # of numpy arrays. Hence we print the options to old versions. 78 import numpy as np 79 if LooseVersion(np.__version__) >= LooseVersion("1.14"): 80 # See issue #1600 in nilearn for reason to add try and except 81 try: 82 from ._utils.testing import is_nose_running 83 if is_nose_running(): 84 np.set_printoptions(legacy='1.13') 85 except ImportError: 86 pass 87 88 # Monkey-patch gzip to have faster reads on large gzip files 89 if hasattr(gzip.GzipFile, 'max_read_chunk'): 90 gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb 91 92 # Boolean controlling the default globbing technique when using check_niimg 93 # and the os.path.expanduser usage in CacheMixin. 94 # Default value it True, set it to False to completely deactivate this 95 # behavior. 96 EXPAND_PATH_WILDCARDS = True 97 98 # Boolean controlling whether the joblib caches should be 99 # flushed if the version of certain modules changes (eg nibabel, as it 100 # does not respect the backward compatibility in some of its internal 101 # structures 102 # This is used in nilearn._utils.cache_mixin 103 CHECK_CACHE_VERSION = True 104 105 # list all submodules available in nilearn and version 106 __all__ = ['datasets', 'decoding', 'decomposition', 'connectome', 107 'image', 'input_data', 'masking', 'mass_univariate', 'plotting', 108 'region', 'signal', 'surface', 'parcellations', '__version__'] 109 110 [end of nilearn/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nilearn/__init__.py b/nilearn/__init__.py --- a/nilearn/__init__.py +++ b/nilearn/__init__.py @@ -35,11 +35,17 @@ import gzip import sys import warnings +import os from distutils.version import LooseVersion from .version import _check_module_dependencies, __version__ +# Workaround issue discovered in intel-openmp 2019.5: +# https://github.com/ContinuumIO/anaconda-issues/issues/11294 +# +# see also https://github.com/scikit-learn/scikit-learn/pull/15020 +os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE") def _py2_deprecation_warning(): py2_warning = ('Python2 support is deprecated and will be removed in '
{"golden_diff": "diff --git a/nilearn/__init__.py b/nilearn/__init__.py\n--- a/nilearn/__init__.py\n+++ b/nilearn/__init__.py\n@@ -35,11 +35,17 @@\n import gzip\n import sys\n import warnings\n+import os\n \n from distutils.version import LooseVersion\n \n from .version import _check_module_dependencies, __version__\n \n+# Workaround issue discovered in intel-openmp 2019.5:\n+# https://github.com/ContinuumIO/anaconda-issues/issues/11294\n+#\n+# see also https://github.com/scikit-learn/scikit-learn/pull/15020\n+os.environ.setdefault(\"KMP_INIT_AT_FORK\", \"FALSE\")\n \n def _py2_deprecation_warning():\n py2_warning = ('Python2 support is deprecated and will be removed in '\n", "issue": "test_mask_reducer failing on circle CI\ntesting the mask reducer from nilearn.decompositions.base sometimes fails on circleCI, e.g.\r\nhttps://travis-ci.org/nilearn/nilearn/builds/583674961?utm_source=github_status&utm_medium=notification\r\n\r\nthe process gets a SIGABRT\r\n\r\ncould be due to the recent unvendoring of joblib, although this error started more recently\r\n\r\nso far I haven't been able to reproduce this locally\n", "before_files": [{"content": "\"\"\"\nMachine Learning module for NeuroImaging in python\n--------------------------------------------------\n\nDocumentation is available in the docstrings and online at\nhttp://nilearn.github.io.\n\nContents\n--------\nNilearn aims at simplifying the use of the scikit-learn package in the context of\nneuroimaging. It provides specific input/output functions, algorithms and\nvisualization tools.\n\nSubmodules\n---------\ndatasets --- Utilities to download NeuroImaging datasets\ndecoding --- Decoding tools and algorithms\ndecomposition --- Includes a subject level variant of the ICA\n algorithm called Canonical ICA\nconnectome --- Set of tools for computing functional connectivity matrices\n and for sparse multi-subjects learning of Gaussian graphical models\nimage --- Set of functions defining mathematical operations\n working on Niimg-like objects\ninput_data --- includes scikit-learn tranformers and tools to\n preprocess neuro-imaging data\nmasking --- Utilities to compute and operate on brain masks\nmass_univariate --- Defines a Massively Univariate Linear Model\n estimated with OLS and permutation test\nplotting --- Plotting code for nilearn\nregion --- Set of functions for extracting region-defined\n signals, clustering methods, connected regions extraction\nsignal --- Set of preprocessing functions for time series\n\"\"\"\n\nimport gzip\nimport sys\nimport warnings\n\nfrom distutils.version import LooseVersion\n\nfrom .version import _check_module_dependencies, __version__\n\n\ndef _py2_deprecation_warning():\n py2_warning = ('Python2 support is deprecated and will be removed in '\n 'the next release. Consider switching to Python 3.6 or 3.7.'\n )\n warnings.filterwarnings('once', message=py2_warning)\n warnings.warn(message=py2_warning,\n category=DeprecationWarning,\n stacklevel=3,\n )\n\ndef _py34_deprecation_warning():\n py34_warning = ('Python 3.4 support is deprecated and will be removed in '\n 'the next release. Consider switching to Python 3.6 or 3.7.'\n )\n warnings.filterwarnings('once', message=py34_warning)\n warnings.warn(message=py34_warning,\n category=DeprecationWarning,\n stacklevel=3,\n )\n\n\ndef _python_deprecation_warnings():\n if sys.version_info.major == 2:\n _py2_deprecation_warning()\n elif sys.version_info.major == 3 and sys.version_info.minor == 4:\n _py34_deprecation_warning()\n\n\n_check_module_dependencies()\n_python_deprecation_warnings()\n\n# Temporary work around to address formatting issues in doc tests\n# with NumPy 1.14. NumPy had made more consistent str/repr formatting\n# of numpy arrays. Hence we print the options to old versions.\nimport numpy as np\nif LooseVersion(np.__version__) >= LooseVersion(\"1.14\"):\n # See issue #1600 in nilearn for reason to add try and except\n try:\n from ._utils.testing import is_nose_running\n if is_nose_running():\n np.set_printoptions(legacy='1.13')\n except ImportError:\n pass\n\n# Monkey-patch gzip to have faster reads on large gzip files\nif hasattr(gzip.GzipFile, 'max_read_chunk'):\n gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb\n\n# Boolean controlling the default globbing technique when using check_niimg\n# and the os.path.expanduser usage in CacheMixin.\n# Default value it True, set it to False to completely deactivate this\n# behavior.\nEXPAND_PATH_WILDCARDS = True\n\n# Boolean controlling whether the joblib caches should be\n# flushed if the version of certain modules changes (eg nibabel, as it\n# does not respect the backward compatibility in some of its internal\n# structures\n# This is used in nilearn._utils.cache_mixin\nCHECK_CACHE_VERSION = True\n\n# list all submodules available in nilearn and version\n__all__ = ['datasets', 'decoding', 'decomposition', 'connectome',\n 'image', 'input_data', 'masking', 'mass_univariate', 'plotting',\n 'region', 'signal', 'surface', 'parcellations', '__version__']\n\n", "path": "nilearn/__init__.py"}]}
1,803
195
gh_patches_debug_23916
rasdani/github-patches
git_diff
pypa__virtualenv-2220
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `--prompt .` : the dot causes the basename of the current directory to be used **What's the problem this feature will solve?** <!-- What are you trying to do, that you are unable to achieve with pip as it currently stands? --> I'd like an easy way to have the prompt reflect the directory the env was created in. **Describe the solution you'd like** <!-- Clear and concise description of what you want to happen. --> I'd like `--prompt .` to use the basename of the current directory for the prompt, similar to what venv does. More specifically, the prompt should be the directory in parentheses. <!-- Provide examples of real world use cases that this would enable and how it solves the problem described above. --> ```bash $ mkdir foo $ cd foo $ virtualenv venv --prompt . $ source venv/bin/activate (foo) $ ``` **Alternative Solutions** <!-- Have you tried to workaround the problem using pip or other tools? Or a different approach to solving this issue? Please elaborate here. --> We could change the default prompt to be this. that'd be cool. **Additional context** <!-- Add any other context, links, etc. about the feature here. --> This as been in the venv implementation of Pyhon since Python 3.9.something. Python 3 documentation: https://docs.python.org/3/library/venv.html#api > prompt – a String to be used after virtual environment is activated (defaults to None which means directory name of the environment would be used). _If the special string "." is provided, the basename of the current directory is used as the prompt._ </issue> <code> [start of src/virtualenv/run/plugin/activators.py] 1 from __future__ import absolute_import, unicode_literals 2 3 from argparse import ArgumentTypeError 4 from collections import OrderedDict 5 6 from .base import ComponentBuilder 7 8 9 class ActivationSelector(ComponentBuilder): 10 def __init__(self, interpreter, parser): 11 self.default = None 12 possible = OrderedDict( 13 (k, v) for k, v in self.options("virtualenv.activate").items() if v.supports(interpreter) 14 ) 15 super(ActivationSelector, self).__init__(interpreter, parser, "activators", possible) 16 self.parser.description = "options for activation scripts" 17 self.active = None 18 19 def add_selector_arg_parse(self, name, choices): 20 self.default = ",".join(choices) 21 self.parser.add_argument( 22 "--{}".format(name), 23 default=self.default, 24 metavar="comma_sep_list", 25 required=False, 26 help="activators to generate - default is all supported", 27 type=self._extract_activators, 28 ) 29 30 def _extract_activators(self, entered_str): 31 elements = [e.strip() for e in entered_str.split(",") if e.strip()] 32 missing = [e for e in elements if e not in self.possible] 33 if missing: 34 raise ArgumentTypeError("the following activators are not available {}".format(",".join(missing))) 35 return elements 36 37 def handle_selected_arg_parse(self, options): 38 selected_activators = ( 39 self._extract_activators(self.default) if options.activators is self.default else options.activators 40 ) 41 self.active = {k: v for k, v in self.possible.items() if k in selected_activators} 42 self.parser.add_argument( 43 "--prompt", 44 dest="prompt", 45 metavar="prompt", 46 help="provides an alternative prompt prefix for this environment", 47 default=None, 48 ) 49 for activator in self.active.values(): 50 activator.add_parser_arguments(self.parser, self.interpreter) 51 52 def create(self, options): 53 return [activator_class(options) for activator_class in self.active.values()] 54 [end of src/virtualenv/run/plugin/activators.py] [start of src/virtualenv/activation/activator.py] 1 from __future__ import absolute_import, unicode_literals 2 3 from abc import ABCMeta, abstractmethod 4 5 from six import add_metaclass 6 7 8 @add_metaclass(ABCMeta) 9 class Activator(object): 10 """Generates an activate script for the virtual environment""" 11 12 def __init__(self, options): 13 """Create a new activator generator. 14 15 :param options: the parsed options as defined within :meth:`add_parser_arguments` 16 """ 17 self.flag_prompt = options.prompt 18 19 @classmethod 20 def supports(cls, interpreter): 21 """Check if the activation script is supported in the given interpreter. 22 23 :param interpreter: the interpreter we need to support 24 :return: ``True`` if supported, ``False`` otherwise 25 """ 26 return True 27 28 @classmethod 29 def add_parser_arguments(cls, parser, interpreter): 30 """ 31 Add CLI arguments for this activation script. 32 33 :param parser: the CLI parser 34 :param interpreter: the interpreter this virtual environment is based of 35 """ 36 37 @abstractmethod 38 def generate(self, creator): 39 """Generate the activate script for the given creator. 40 41 :param creator: the creator (based of :class:`virtualenv.create.creator.Creator`) we used to create this \ 42 virtual environment 43 """ 44 raise NotImplementedError 45 [end of src/virtualenv/activation/activator.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/virtualenv/activation/activator.py b/src/virtualenv/activation/activator.py --- a/src/virtualenv/activation/activator.py +++ b/src/virtualenv/activation/activator.py @@ -1,5 +1,6 @@ from __future__ import absolute_import, unicode_literals +import os from abc import ABCMeta, abstractmethod from six import add_metaclass @@ -14,7 +15,7 @@ :param options: the parsed options as defined within :meth:`add_parser_arguments` """ - self.flag_prompt = options.prompt + self.flag_prompt = os.path.basename(os.getcwd()) if options.prompt == "." else options.prompt @classmethod def supports(cls, interpreter): diff --git a/src/virtualenv/run/plugin/activators.py b/src/virtualenv/run/plugin/activators.py --- a/src/virtualenv/run/plugin/activators.py +++ b/src/virtualenv/run/plugin/activators.py @@ -43,7 +43,10 @@ "--prompt", dest="prompt", metavar="prompt", - help="provides an alternative prompt prefix for this environment", + help=( + "provides an alternative prompt prefix for this environment " + "(value of . means name of the current working directory)" + ), default=None, ) for activator in self.active.values():
{"golden_diff": "diff --git a/src/virtualenv/activation/activator.py b/src/virtualenv/activation/activator.py\n--- a/src/virtualenv/activation/activator.py\n+++ b/src/virtualenv/activation/activator.py\n@@ -1,5 +1,6 @@\n from __future__ import absolute_import, unicode_literals\n \n+import os\n from abc import ABCMeta, abstractmethod\n \n from six import add_metaclass\n@@ -14,7 +15,7 @@\n \n :param options: the parsed options as defined within :meth:`add_parser_arguments`\n \"\"\"\n- self.flag_prompt = options.prompt\n+ self.flag_prompt = os.path.basename(os.getcwd()) if options.prompt == \".\" else options.prompt\n \n @classmethod\n def supports(cls, interpreter):\ndiff --git a/src/virtualenv/run/plugin/activators.py b/src/virtualenv/run/plugin/activators.py\n--- a/src/virtualenv/run/plugin/activators.py\n+++ b/src/virtualenv/run/plugin/activators.py\n@@ -43,7 +43,10 @@\n \"--prompt\",\n dest=\"prompt\",\n metavar=\"prompt\",\n- help=\"provides an alternative prompt prefix for this environment\",\n+ help=(\n+ \"provides an alternative prompt prefix for this environment \"\n+ \"(value of . means name of the current working directory)\"\n+ ),\n default=None,\n )\n for activator in self.active.values():\n", "issue": "`--prompt .` : the dot causes the basename of the current directory to be used\n**What's the problem this feature will solve?**\r\n<!-- What are you trying to do, that you are unable to achieve with pip as it currently stands? -->\r\nI'd like an easy way to have the prompt reflect the directory the env was created in.\r\n\r\n**Describe the solution you'd like**\r\n<!-- Clear and concise description of what you want to happen. -->\r\nI'd like `--prompt .` to use the basename of the current directory for the prompt, similar to what venv does. \r\nMore specifically, the prompt should be the directory in parentheses. \r\n\r\n<!-- Provide examples of real world use cases that this would enable and how it solves the problem described above. -->\r\n```bash\r\n$ mkdir foo\r\n$ cd foo\r\n$ virtualenv venv --prompt .\r\n$ source venv/bin/activate\r\n(foo) $ \r\n```\r\n\r\n**Alternative Solutions**\r\n<!-- Have you tried to workaround the problem using pip or other tools? Or a different approach to solving this issue? Please elaborate here. -->\r\nWe could change the default prompt to be this. that'd be cool. \r\n\r\n**Additional context**\r\n<!-- Add any other context, links, etc. about the feature here. -->\r\nThis as been in the venv implementation of Pyhon since Python 3.9.something.\r\n\r\nPython 3 documentation: https://docs.python.org/3/library/venv.html#api \r\n\r\n> prompt \u2013 a String to be used after virtual environment is activated (defaults to None which means directory name of the environment would be used). _If the special string \".\" is provided, the basename of the current directory is used as the prompt._\r\n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom argparse import ArgumentTypeError\nfrom collections import OrderedDict\n\nfrom .base import ComponentBuilder\n\n\nclass ActivationSelector(ComponentBuilder):\n def __init__(self, interpreter, parser):\n self.default = None\n possible = OrderedDict(\n (k, v) for k, v in self.options(\"virtualenv.activate\").items() if v.supports(interpreter)\n )\n super(ActivationSelector, self).__init__(interpreter, parser, \"activators\", possible)\n self.parser.description = \"options for activation scripts\"\n self.active = None\n\n def add_selector_arg_parse(self, name, choices):\n self.default = \",\".join(choices)\n self.parser.add_argument(\n \"--{}\".format(name),\n default=self.default,\n metavar=\"comma_sep_list\",\n required=False,\n help=\"activators to generate - default is all supported\",\n type=self._extract_activators,\n )\n\n def _extract_activators(self, entered_str):\n elements = [e.strip() for e in entered_str.split(\",\") if e.strip()]\n missing = [e for e in elements if e not in self.possible]\n if missing:\n raise ArgumentTypeError(\"the following activators are not available {}\".format(\",\".join(missing)))\n return elements\n\n def handle_selected_arg_parse(self, options):\n selected_activators = (\n self._extract_activators(self.default) if options.activators is self.default else options.activators\n )\n self.active = {k: v for k, v in self.possible.items() if k in selected_activators}\n self.parser.add_argument(\n \"--prompt\",\n dest=\"prompt\",\n metavar=\"prompt\",\n help=\"provides an alternative prompt prefix for this environment\",\n default=None,\n )\n for activator in self.active.values():\n activator.add_parser_arguments(self.parser, self.interpreter)\n\n def create(self, options):\n return [activator_class(options) for activator_class in self.active.values()]\n", "path": "src/virtualenv/run/plugin/activators.py"}, {"content": "from __future__ import absolute_import, unicode_literals\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import add_metaclass\n\n\n@add_metaclass(ABCMeta)\nclass Activator(object):\n \"\"\"Generates an activate script for the virtual environment\"\"\"\n\n def __init__(self, options):\n \"\"\"Create a new activator generator.\n\n :param options: the parsed options as defined within :meth:`add_parser_arguments`\n \"\"\"\n self.flag_prompt = options.prompt\n\n @classmethod\n def supports(cls, interpreter):\n \"\"\"Check if the activation script is supported in the given interpreter.\n\n :param interpreter: the interpreter we need to support\n :return: ``True`` if supported, ``False`` otherwise\n \"\"\"\n return True\n\n @classmethod\n def add_parser_arguments(cls, parser, interpreter):\n \"\"\"\n Add CLI arguments for this activation script.\n\n :param parser: the CLI parser\n :param interpreter: the interpreter this virtual environment is based of\n \"\"\"\n\n @abstractmethod\n def generate(self, creator):\n \"\"\"Generate the activate script for the given creator.\n\n :param creator: the creator (based of :class:`virtualenv.create.creator.Creator`) we used to create this \\\n virtual environment\n \"\"\"\n raise NotImplementedError\n", "path": "src/virtualenv/activation/activator.py"}]}
1,804
310
gh_patches_debug_28922
rasdani/github-patches
git_diff
bridgecrewio__checkov-3612
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Outdated check for `google_container_cluster` binary authorization **Describe the issue** `Check: CKV_GCP_66: "Ensure use of Binary Authorization"` is not checking [against the updated property](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#nested_binary_authorization) causing the check to fail. The current property states that the old method [`enable_binary_authorization`](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#enable_binary_authorization) is deprecated in favor the new [`binary_authorization`](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#nested_binary_authorization) block **Examples** Before: ```hcl enable_binary_authorization = true ``` Now: ```hcl binary_authorization { evaluation_mode = "PROJECT_SINGLETON_POLICY_ENFORCE" } ``` **Version (please complete the following information):** - Checkov Version 2.1.149 </issue> <code> [start of checkov/terraform/checks/resource/gcp/GKEBinaryAuthorization.py] 1 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 2 from checkov.common.models.enums import CheckCategories 3 4 5 class GKEBinaryAuthorization(BaseResourceValueCheck): 6 def __init__(self) -> None: 7 name = "Ensure use of Binary Authorization" 8 id = "CKV_GCP_66" 9 supported_resources = ("google_container_cluster",) 10 categories = (CheckCategories.KUBERNETES,) 11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 12 13 def get_inspected_key(self) -> str: 14 return "enable_binary_authorization" 15 16 17 check = GKEBinaryAuthorization() 18 [end of checkov/terraform/checks/resource/gcp/GKEBinaryAuthorization.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/gcp/GKEBinaryAuthorization.py b/checkov/terraform/checks/resource/gcp/GKEBinaryAuthorization.py --- a/checkov/terraform/checks/resource/gcp/GKEBinaryAuthorization.py +++ b/checkov/terraform/checks/resource/gcp/GKEBinaryAuthorization.py @@ -1,17 +1,29 @@ -from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck -from checkov.common.models.enums import CheckCategories +from checkov.common.models.enums import CheckCategories, CheckResult +from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck -class GKEBinaryAuthorization(BaseResourceValueCheck): - def __init__(self) -> None: +class GKEBinaryAuthorization(BaseResourceCheck): + def __init__(self): name = "Ensure use of Binary Authorization" id = "CKV_GCP_66" - supported_resources = ("google_container_cluster",) - categories = (CheckCategories.KUBERNETES,) + supported_resources = ['google_container_cluster'] + categories = [CheckCategories.KUBERNETES] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) - def get_inspected_key(self) -> str: - return "enable_binary_authorization" + def scan_resource_conf(self, conf): + if 'binary_authorization' in conf.keys(): + binary_authorization = conf["binary_authorization"][0] + if isinstance(binary_authorization, dict) and 'evaluation_mode' in binary_authorization: + # Google provider version >= v4.31.0 + if binary_authorization.get("evaluation_mode") == ["PROJECT_SINGLETON_POLICY_ENFORCE"]: + return CheckResult.PASSED + # Google provider version v4.29.0 and v4.30.0 + elif binary_authorization.get("evaluation_mode") == [True]: + return CheckResult.PASSED + # Google provider version <= v4.28.0 + if conf.get("enable_binary_authorization") == [True]: + return CheckResult.PASSED + return CheckResult.FAILED check = GKEBinaryAuthorization()
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/gcp/GKEBinaryAuthorization.py b/checkov/terraform/checks/resource/gcp/GKEBinaryAuthorization.py\n--- a/checkov/terraform/checks/resource/gcp/GKEBinaryAuthorization.py\n+++ b/checkov/terraform/checks/resource/gcp/GKEBinaryAuthorization.py\n@@ -1,17 +1,29 @@\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-from checkov.common.models.enums import CheckCategories\n+from checkov.common.models.enums import CheckCategories, CheckResult\n+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n \n-class GKEBinaryAuthorization(BaseResourceValueCheck):\n- def __init__(self) -> None:\n+class GKEBinaryAuthorization(BaseResourceCheck):\n+ def __init__(self):\n name = \"Ensure use of Binary Authorization\"\n id = \"CKV_GCP_66\"\n- supported_resources = (\"google_container_cluster\",)\n- categories = (CheckCategories.KUBERNETES,)\n+ supported_resources = ['google_container_cluster']\n+ categories = [CheckCategories.KUBERNETES]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def get_inspected_key(self) -> str:\n- return \"enable_binary_authorization\"\n+ def scan_resource_conf(self, conf):\n+ if 'binary_authorization' in conf.keys():\n+ binary_authorization = conf[\"binary_authorization\"][0]\n+ if isinstance(binary_authorization, dict) and 'evaluation_mode' in binary_authorization:\n+ # Google provider version >= v4.31.0\n+ if binary_authorization.get(\"evaluation_mode\") == [\"PROJECT_SINGLETON_POLICY_ENFORCE\"]:\n+ return CheckResult.PASSED\n+ # Google provider version v4.29.0 and v4.30.0\n+ elif binary_authorization.get(\"evaluation_mode\") == [True]:\n+ return CheckResult.PASSED\n+ # Google provider version <= v4.28.0\n+ if conf.get(\"enable_binary_authorization\") == [True]:\n+ return CheckResult.PASSED\n+ return CheckResult.FAILED\n \n \n check = GKEBinaryAuthorization()\n", "issue": "Outdated check for `google_container_cluster` binary authorization\n**Describe the issue**\r\n`Check: CKV_GCP_66: \"Ensure use of Binary Authorization\"` is not checking [against the updated property](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#nested_binary_authorization) causing the check to fail.\r\n\r\nThe current property states that the old method [`enable_binary_authorization`](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#enable_binary_authorization) is deprecated in favor the new [`binary_authorization`](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#nested_binary_authorization) block\r\n\r\n**Examples**\r\n\r\nBefore:\r\n```hcl\r\nenable_binary_authorization = true\r\n```\r\n\r\nNow:\r\n```hcl\r\n binary_authorization {\r\n evaluation_mode = \"PROJECT_SINGLETON_POLICY_ENFORCE\"\r\n }\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.1.149\r\n\r\n\n", "before_files": [{"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass GKEBinaryAuthorization(BaseResourceValueCheck):\n def __init__(self) -> None:\n name = \"Ensure use of Binary Authorization\"\n id = \"CKV_GCP_66\"\n supported_resources = (\"google_container_cluster\",)\n categories = (CheckCategories.KUBERNETES,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self) -> str:\n return \"enable_binary_authorization\"\n\n\ncheck = GKEBinaryAuthorization()\n", "path": "checkov/terraform/checks/resource/gcp/GKEBinaryAuthorization.py"}]}
939
494
gh_patches_debug_42433
rasdani/github-patches
git_diff
quantopian__zipline-463
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support for nans in input data If a ticker hasn't started trading yet, pandas' convention is to use nans. Currently it seems as if zipline raises an exception if there are nans in the input data. This should be fixed and the corresponding ticker to should not be included if it hasn't started trading yet. </issue> <code> [start of zipline/sources/data_frame_source.py] 1 2 # 3 # Copyright 2013 Quantopian, Inc. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 """ 18 Tools to generate data sources. 19 """ 20 import pandas as pd 21 22 from zipline.gens.utils import hash_args 23 24 from zipline.sources.data_source import DataSource 25 26 27 class DataFrameSource(DataSource): 28 """ 29 Yields all events in event_list that match the given sid_filter. 30 If no event_list is specified, generates an internal stream of events 31 to filter. Returns all events if filter is None. 32 33 Configuration options: 34 35 sids : list of values representing simulated internal sids 36 start : start date 37 delta : timedelta between internal events 38 filter : filter to remove the sids 39 """ 40 41 def __init__(self, data, **kwargs): 42 assert isinstance(data.index, pd.tseries.index.DatetimeIndex) 43 44 self.data = data 45 # Unpack config dictionary with default values. 46 self.sids = kwargs.get('sids', data.columns) 47 self.start = kwargs.get('start', data.index[0]) 48 self.end = kwargs.get('end', data.index[-1]) 49 50 # Hash_value for downstream sorting. 51 self.arg_string = hash_args(data, **kwargs) 52 53 self._raw_data = None 54 55 @property 56 def mapping(self): 57 return { 58 'dt': (lambda x: x, 'dt'), 59 'sid': (lambda x: x, 'sid'), 60 'price': (float, 'price'), 61 'volume': (int, 'volume'), 62 } 63 64 @property 65 def instance_hash(self): 66 return self.arg_string 67 68 def raw_data_gen(self): 69 for dt, series in self.data.iterrows(): 70 for sid, price in series.iteritems(): 71 if sid in self.sids: 72 event = { 73 'dt': dt, 74 'sid': sid, 75 'price': price, 76 # Just chose something large 77 # if no volume available. 78 'volume': 1e9, 79 } 80 yield event 81 82 @property 83 def raw_data(self): 84 if not self._raw_data: 85 self._raw_data = self.raw_data_gen() 86 return self._raw_data 87 88 89 class DataPanelSource(DataSource): 90 """ 91 Yields all events in event_list that match the given sid_filter. 92 If no event_list is specified, generates an internal stream of events 93 to filter. Returns all events if filter is None. 94 95 Configuration options: 96 97 sids : list of values representing simulated internal sids 98 start : start date 99 delta : timedelta between internal events 100 filter : filter to remove the sids 101 """ 102 103 def __init__(self, data, **kwargs): 104 assert isinstance(data.major_axis, pd.tseries.index.DatetimeIndex) 105 106 self.data = data 107 # Unpack config dictionary with default values. 108 self.sids = kwargs.get('sids', data.items) 109 self.start = kwargs.get('start', data.major_axis[0]) 110 self.end = kwargs.get('end', data.major_axis[-1]) 111 112 # Hash_value for downstream sorting. 113 self.arg_string = hash_args(data, **kwargs) 114 115 self._raw_data = None 116 117 @property 118 def mapping(self): 119 mapping = { 120 'dt': (lambda x: x, 'dt'), 121 'sid': (lambda x: x, 'sid'), 122 'price': (float, 'price'), 123 'volume': (int, 'volume'), 124 } 125 126 # Add additional fields. 127 for field_name in self.data.minor_axis: 128 if field_name in ['price', 'volume', 'dt', 'sid']: 129 continue 130 mapping[field_name] = (lambda x: x, field_name) 131 132 return mapping 133 134 @property 135 def instance_hash(self): 136 return self.arg_string 137 138 def raw_data_gen(self): 139 for dt in self.data.major_axis: 140 df = self.data.major_xs(dt) 141 for sid, series in df.iteritems(): 142 if sid in self.sids: 143 event = { 144 'dt': dt, 145 'sid': sid, 146 } 147 for field_name, value in series.iteritems(): 148 event[field_name] = value 149 150 yield event 151 152 @property 153 def raw_data(self): 154 if not self._raw_data: 155 self._raw_data = self.raw_data_gen() 156 return self._raw_data 157 [end of zipline/sources/data_frame_source.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zipline/sources/data_frame_source.py b/zipline/sources/data_frame_source.py --- a/zipline/sources/data_frame_source.py +++ b/zipline/sources/data_frame_source.py @@ -1,6 +1,5 @@ - # -# Copyright 2013 Quantopian, Inc. +# Copyright 2015 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,6 +16,7 @@ """ Tools to generate data sources. """ +import numpy as np import pandas as pd from zipline.gens.utils import hash_args @@ -26,16 +26,14 @@ class DataFrameSource(DataSource): """ - Yields all events in event_list that match the given sid_filter. - If no event_list is specified, generates an internal stream of events - to filter. Returns all events if filter is None. + Data source that yields from a pandas DataFrame. - Configuration options: + :Axis layout: + * columns : sids + * index : datetime - sids : list of values representing simulated internal sids - start : start date - delta : timedelta between internal events - filter : filter to remove the sids + :Note: + Bars where the price is nan are filtered out. """ def __init__(self, data, **kwargs): @@ -52,6 +50,8 @@ self._raw_data = None + self.started_sids = set() + @property def mapping(self): return { @@ -69,6 +69,12 @@ for dt, series in self.data.iterrows(): for sid, price in series.iteritems(): if sid in self.sids: + # Skip SIDs that can not be forward filled + if np.isnan(price) and \ + sid not in self.started_sids: + continue + self.started_sids.add(sid) + event = { 'dt': dt, 'sid': sid, @@ -88,16 +94,15 @@ class DataPanelSource(DataSource): """ - Yields all events in event_list that match the given sid_filter. - If no event_list is specified, generates an internal stream of events - to filter. Returns all events if filter is None. + Data source that yields from a pandas Panel. - Configuration options: + :Axis layout: + * items : sids + * major_axis : datetime + * minor_axis : price, volume, ... - sids : list of values representing simulated internal sids - start : start date - delta : timedelta between internal events - filter : filter to remove the sids + :Note: + Bars where the price is nan are filtered out. """ def __init__(self, data, **kwargs): @@ -114,6 +119,8 @@ self._raw_data = None + self.started_sids = set() + @property def mapping(self): mapping = { @@ -140,6 +147,12 @@ df = self.data.major_xs(dt) for sid, series in df.iteritems(): if sid in self.sids: + # Skip SIDs that can not be forward filled + if np.isnan(series['price']) and \ + sid not in self.started_sids: + continue + self.started_sids.add(sid) + event = { 'dt': dt, 'sid': sid,
{"golden_diff": "diff --git a/zipline/sources/data_frame_source.py b/zipline/sources/data_frame_source.py\n--- a/zipline/sources/data_frame_source.py\n+++ b/zipline/sources/data_frame_source.py\n@@ -1,6 +1,5 @@\n-\n #\n-# Copyright 2013 Quantopian, Inc.\n+# Copyright 2015 Quantopian, Inc.\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n@@ -17,6 +16,7 @@\n \"\"\"\n Tools to generate data sources.\n \"\"\"\n+import numpy as np\n import pandas as pd\n \n from zipline.gens.utils import hash_args\n@@ -26,16 +26,14 @@\n \n class DataFrameSource(DataSource):\n \"\"\"\n- Yields all events in event_list that match the given sid_filter.\n- If no event_list is specified, generates an internal stream of events\n- to filter. Returns all events if filter is None.\n+ Data source that yields from a pandas DataFrame.\n \n- Configuration options:\n+ :Axis layout:\n+ * columns : sids\n+ * index : datetime\n \n- sids : list of values representing simulated internal sids\n- start : start date\n- delta : timedelta between internal events\n- filter : filter to remove the sids\n+ :Note:\n+ Bars where the price is nan are filtered out.\n \"\"\"\n \n def __init__(self, data, **kwargs):\n@@ -52,6 +50,8 @@\n \n self._raw_data = None\n \n+ self.started_sids = set()\n+\n @property\n def mapping(self):\n return {\n@@ -69,6 +69,12 @@\n for dt, series in self.data.iterrows():\n for sid, price in series.iteritems():\n if sid in self.sids:\n+ # Skip SIDs that can not be forward filled\n+ if np.isnan(price) and \\\n+ sid not in self.started_sids:\n+ continue\n+ self.started_sids.add(sid)\n+\n event = {\n 'dt': dt,\n 'sid': sid,\n@@ -88,16 +94,15 @@\n \n class DataPanelSource(DataSource):\n \"\"\"\n- Yields all events in event_list that match the given sid_filter.\n- If no event_list is specified, generates an internal stream of events\n- to filter. Returns all events if filter is None.\n+ Data source that yields from a pandas Panel.\n \n- Configuration options:\n+ :Axis layout:\n+ * items : sids\n+ * major_axis : datetime\n+ * minor_axis : price, volume, ...\n \n- sids : list of values representing simulated internal sids\n- start : start date\n- delta : timedelta between internal events\n- filter : filter to remove the sids\n+ :Note:\n+ Bars where the price is nan are filtered out.\n \"\"\"\n \n def __init__(self, data, **kwargs):\n@@ -114,6 +119,8 @@\n \n self._raw_data = None\n \n+ self.started_sids = set()\n+\n @property\n def mapping(self):\n mapping = {\n@@ -140,6 +147,12 @@\n df = self.data.major_xs(dt)\n for sid, series in df.iteritems():\n if sid in self.sids:\n+ # Skip SIDs that can not be forward filled\n+ if np.isnan(series['price']) and \\\n+ sid not in self.started_sids:\n+ continue\n+ self.started_sids.add(sid)\n+\n event = {\n 'dt': dt,\n 'sid': sid,\n", "issue": "Support for nans in input data\nIf a ticker hasn't started trading yet, pandas' convention is to use nans. Currently it seems as if zipline raises an exception if there are nans in the input data. This should be fixed and the corresponding ticker to should not be included if it hasn't started trading yet.\n\n", "before_files": [{"content": "\n#\n# Copyright 2013 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTools to generate data sources.\n\"\"\"\nimport pandas as pd\n\nfrom zipline.gens.utils import hash_args\n\nfrom zipline.sources.data_source import DataSource\n\n\nclass DataFrameSource(DataSource):\n \"\"\"\n Yields all events in event_list that match the given sid_filter.\n If no event_list is specified, generates an internal stream of events\n to filter. Returns all events if filter is None.\n\n Configuration options:\n\n sids : list of values representing simulated internal sids\n start : start date\n delta : timedelta between internal events\n filter : filter to remove the sids\n \"\"\"\n\n def __init__(self, data, **kwargs):\n assert isinstance(data.index, pd.tseries.index.DatetimeIndex)\n\n self.data = data\n # Unpack config dictionary with default values.\n self.sids = kwargs.get('sids', data.columns)\n self.start = kwargs.get('start', data.index[0])\n self.end = kwargs.get('end', data.index[-1])\n\n # Hash_value for downstream sorting.\n self.arg_string = hash_args(data, **kwargs)\n\n self._raw_data = None\n\n @property\n def mapping(self):\n return {\n 'dt': (lambda x: x, 'dt'),\n 'sid': (lambda x: x, 'sid'),\n 'price': (float, 'price'),\n 'volume': (int, 'volume'),\n }\n\n @property\n def instance_hash(self):\n return self.arg_string\n\n def raw_data_gen(self):\n for dt, series in self.data.iterrows():\n for sid, price in series.iteritems():\n if sid in self.sids:\n event = {\n 'dt': dt,\n 'sid': sid,\n 'price': price,\n # Just chose something large\n # if no volume available.\n 'volume': 1e9,\n }\n yield event\n\n @property\n def raw_data(self):\n if not self._raw_data:\n self._raw_data = self.raw_data_gen()\n return self._raw_data\n\n\nclass DataPanelSource(DataSource):\n \"\"\"\n Yields all events in event_list that match the given sid_filter.\n If no event_list is specified, generates an internal stream of events\n to filter. Returns all events if filter is None.\n\n Configuration options:\n\n sids : list of values representing simulated internal sids\n start : start date\n delta : timedelta between internal events\n filter : filter to remove the sids\n \"\"\"\n\n def __init__(self, data, **kwargs):\n assert isinstance(data.major_axis, pd.tseries.index.DatetimeIndex)\n\n self.data = data\n # Unpack config dictionary with default values.\n self.sids = kwargs.get('sids', data.items)\n self.start = kwargs.get('start', data.major_axis[0])\n self.end = kwargs.get('end', data.major_axis[-1])\n\n # Hash_value for downstream sorting.\n self.arg_string = hash_args(data, **kwargs)\n\n self._raw_data = None\n\n @property\n def mapping(self):\n mapping = {\n 'dt': (lambda x: x, 'dt'),\n 'sid': (lambda x: x, 'sid'),\n 'price': (float, 'price'),\n 'volume': (int, 'volume'),\n }\n\n # Add additional fields.\n for field_name in self.data.minor_axis:\n if field_name in ['price', 'volume', 'dt', 'sid']:\n continue\n mapping[field_name] = (lambda x: x, field_name)\n\n return mapping\n\n @property\n def instance_hash(self):\n return self.arg_string\n\n def raw_data_gen(self):\n for dt in self.data.major_axis:\n df = self.data.major_xs(dt)\n for sid, series in df.iteritems():\n if sid in self.sids:\n event = {\n 'dt': dt,\n 'sid': sid,\n }\n for field_name, value in series.iteritems():\n event[field_name] = value\n\n yield event\n\n @property\n def raw_data(self):\n if not self._raw_data:\n self._raw_data = self.raw_data_gen()\n return self._raw_data\n", "path": "zipline/sources/data_frame_source.py"}]}
2,037
832
gh_patches_debug_7744
rasdani/github-patches
git_diff
mkdocs__mkdocs-289
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `mkdocs serve` doesn't recover from build errors For example, add a new page to the `pages` setting before you create the file, you then need to manually restart the serve command. </issue> <code> [start of mkdocs/serve.py] 1 # coding: utf-8 2 from __future__ import print_function 3 4 from watchdog import events 5 from watchdog.observers.polling import PollingObserver 6 from mkdocs.build import build 7 from mkdocs.compat import httpserver, socketserver, urlunquote 8 from mkdocs.config import load_config 9 import os 10 import posixpath 11 import shutil 12 import sys 13 import tempfile 14 15 16 class BuildEventHandler(events.FileSystemEventHandler): 17 """ 18 Perform a rebuild when anything in the theme or docs directory changes. 19 """ 20 def __init__(self, options): 21 super(BuildEventHandler, self).__init__() 22 self.options = options 23 24 def on_any_event(self, event): 25 if not isinstance(event, events.DirModifiedEvent): 26 print('Rebuilding documentation...', end='') 27 config = load_config(options=self.options) 28 build(config, live_server=True) 29 print(' done') 30 31 32 class ConfigEventHandler(BuildEventHandler): 33 """ 34 Perform a rebuild when the config file changes. 35 """ 36 def on_any_event(self, event): 37 if os.path.basename(event.src_path) == 'mkdocs.yml': 38 super(ConfigEventHandler, self).on_any_event(event) 39 40 41 class FixedDirectoryHandler(httpserver.SimpleHTTPRequestHandler): 42 """ 43 Override the default implementation to allow us to specify the served 44 directory, instead of being hardwired to the current working directory. 45 """ 46 base_dir = os.getcwd() 47 48 def translate_path(self, path): 49 # abandon query parameters 50 path = path.split('?', 1)[0] 51 path = path.split('#', 1)[0] 52 path = posixpath.normpath(urlunquote(path)) 53 words = path.split('/') 54 words = filter(None, words) 55 path = self.base_dir 56 for word in words: 57 drive, word = os.path.splitdrive(word) 58 head, word = os.path.split(word) 59 if word in (os.curdir, os.pardir): 60 continue 61 path = os.path.join(path, word) 62 return path 63 64 def log_message(self, format, *args): 65 date_str = self.log_date_time_string() 66 sys.stderr.write('[%s] %s\n' % (date_str, format % args)) 67 68 69 def serve(config, options=None): 70 """ 71 Start the devserver, and rebuild the docs whenever any changes take effect. 72 """ 73 # Create a temporary build directory, and set some options to serve it 74 tempdir = tempfile.mkdtemp() 75 options['site_dir'] = tempdir 76 77 # Only use user-friendly URLs when running the live server 78 options['use_directory_urls'] = True 79 80 # Perform the initial build 81 config = load_config(options=options) 82 build(config, live_server=True) 83 84 # Note: We pass any command-line options through so that we 85 # can re-apply them if the config file is reloaded. 86 event_handler = BuildEventHandler(options) 87 config_event_handler = ConfigEventHandler(options) 88 89 # We could have used `Observer()`, which can be faster, but 90 # `PollingObserver()` works more universally. 91 observer = PollingObserver() 92 observer.schedule(event_handler, config['docs_dir'], recursive=True) 93 for theme_dir in config['theme_dir']: 94 observer.schedule(event_handler, theme_dir, recursive=True) 95 observer.schedule(config_event_handler, '.') 96 observer.start() 97 98 class TCPServer(socketserver.TCPServer): 99 allow_reuse_address = True 100 101 class DocsDirectoryHandler(FixedDirectoryHandler): 102 base_dir = config['site_dir'] 103 104 host, port = config['dev_addr'].split(':', 1) 105 server = TCPServer((host, int(port)), DocsDirectoryHandler) 106 107 print('Running at: http://%s:%s/' % (host, port)) 108 print('Live reload enabled.') 109 print('Hold ctrl+c to quit.') 110 try: 111 server.serve_forever() 112 except KeyboardInterrupt: 113 print('Stopping server...') 114 115 # Clean up 116 observer.stop() 117 observer.join() 118 shutil.rmtree(tempdir) 119 print('Quit complete') 120 [end of mkdocs/serve.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mkdocs/serve.py b/mkdocs/serve.py --- a/mkdocs/serve.py +++ b/mkdocs/serve.py @@ -34,8 +34,11 @@ Perform a rebuild when the config file changes. """ def on_any_event(self, event): - if os.path.basename(event.src_path) == 'mkdocs.yml': - super(ConfigEventHandler, self).on_any_event(event) + try: + if os.path.basename(event.src_path) == 'mkdocs.yml': + super(ConfigEventHandler, self).on_any_event(event) + except Exception as e: + print(e) class FixedDirectoryHandler(httpserver.SimpleHTTPRequestHandler):
{"golden_diff": "diff --git a/mkdocs/serve.py b/mkdocs/serve.py\n--- a/mkdocs/serve.py\n+++ b/mkdocs/serve.py\n@@ -34,8 +34,11 @@\n Perform a rebuild when the config file changes.\n \"\"\"\n def on_any_event(self, event):\n- if os.path.basename(event.src_path) == 'mkdocs.yml':\n- super(ConfigEventHandler, self).on_any_event(event)\n+ try:\n+ if os.path.basename(event.src_path) == 'mkdocs.yml':\n+ super(ConfigEventHandler, self).on_any_event(event)\n+ except Exception as e:\n+ print(e)\n \n \n class FixedDirectoryHandler(httpserver.SimpleHTTPRequestHandler):\n", "issue": "`mkdocs serve` doesn't recover from build errors\nFor example, add a new page to the `pages` setting before you create the file, you then need to manually restart the serve command.\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import print_function\n\nfrom watchdog import events\nfrom watchdog.observers.polling import PollingObserver\nfrom mkdocs.build import build\nfrom mkdocs.compat import httpserver, socketserver, urlunquote\nfrom mkdocs.config import load_config\nimport os\nimport posixpath\nimport shutil\nimport sys\nimport tempfile\n\n\nclass BuildEventHandler(events.FileSystemEventHandler):\n \"\"\"\n Perform a rebuild when anything in the theme or docs directory changes.\n \"\"\"\n def __init__(self, options):\n super(BuildEventHandler, self).__init__()\n self.options = options\n\n def on_any_event(self, event):\n if not isinstance(event, events.DirModifiedEvent):\n print('Rebuilding documentation...', end='')\n config = load_config(options=self.options)\n build(config, live_server=True)\n print(' done')\n\n\nclass ConfigEventHandler(BuildEventHandler):\n \"\"\"\n Perform a rebuild when the config file changes.\n \"\"\"\n def on_any_event(self, event):\n if os.path.basename(event.src_path) == 'mkdocs.yml':\n super(ConfigEventHandler, self).on_any_event(event)\n\n\nclass FixedDirectoryHandler(httpserver.SimpleHTTPRequestHandler):\n \"\"\"\n Override the default implementation to allow us to specify the served\n directory, instead of being hardwired to the current working directory.\n \"\"\"\n base_dir = os.getcwd()\n\n def translate_path(self, path):\n # abandon query parameters\n path = path.split('?', 1)[0]\n path = path.split('#', 1)[0]\n path = posixpath.normpath(urlunquote(path))\n words = path.split('/')\n words = filter(None, words)\n path = self.base_dir\n for word in words:\n drive, word = os.path.splitdrive(word)\n head, word = os.path.split(word)\n if word in (os.curdir, os.pardir):\n continue\n path = os.path.join(path, word)\n return path\n\n def log_message(self, format, *args):\n date_str = self.log_date_time_string()\n sys.stderr.write('[%s] %s\\n' % (date_str, format % args))\n\n\ndef serve(config, options=None):\n \"\"\"\n Start the devserver, and rebuild the docs whenever any changes take effect.\n \"\"\"\n # Create a temporary build directory, and set some options to serve it\n tempdir = tempfile.mkdtemp()\n options['site_dir'] = tempdir\n\n # Only use user-friendly URLs when running the live server\n options['use_directory_urls'] = True\n\n # Perform the initial build\n config = load_config(options=options)\n build(config, live_server=True)\n\n # Note: We pass any command-line options through so that we\n # can re-apply them if the config file is reloaded.\n event_handler = BuildEventHandler(options)\n config_event_handler = ConfigEventHandler(options)\n\n # We could have used `Observer()`, which can be faster, but\n # `PollingObserver()` works more universally.\n observer = PollingObserver()\n observer.schedule(event_handler, config['docs_dir'], recursive=True)\n for theme_dir in config['theme_dir']:\n observer.schedule(event_handler, theme_dir, recursive=True)\n observer.schedule(config_event_handler, '.')\n observer.start()\n\n class TCPServer(socketserver.TCPServer):\n allow_reuse_address = True\n\n class DocsDirectoryHandler(FixedDirectoryHandler):\n base_dir = config['site_dir']\n\n host, port = config['dev_addr'].split(':', 1)\n server = TCPServer((host, int(port)), DocsDirectoryHandler)\n\n print('Running at: http://%s:%s/' % (host, port))\n print('Live reload enabled.')\n print('Hold ctrl+c to quit.')\n try:\n server.serve_forever()\n except KeyboardInterrupt:\n print('Stopping server...')\n\n # Clean up\n observer.stop()\n observer.join()\n shutil.rmtree(tempdir)\n print('Quit complete')\n", "path": "mkdocs/serve.py"}]}
1,692
156
gh_patches_debug_818
rasdani/github-patches
git_diff
doccano__doccano-363
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> New user signup page question Hi i'm trying to understand the user structure. I see a few posts about only being able to assign users to specific projects through the django admin screen, but my question is about the 'sign up' page you get offered when you click login, is this totally non functional? That is, is the *only* way to make new users of any kind through the django admin page? Thanks, Z </issue> <code> [start of app/authentification/views.py] 1 from django.shortcuts import render 2 from .forms import SignupForm 3 from django.contrib.sites.shortcuts import get_current_site 4 from django.utils.encoding import force_bytes 5 from django.utils.http import urlsafe_base64_encode 6 from django.template.loader import render_to_string 7 from .tokens import account_activation_token 8 from django.core.mail import EmailMessage 9 from django.views.generic import TemplateView 10 from django.shortcuts import redirect 11 12 from app import settings 13 14 15 class SignupView(TemplateView): 16 template_name = 'signup.html' 17 form_class = SignupForm 18 19 def get(self, request, *args, **kwargs): 20 form = self.form_class() 21 return render(request, self.template_name, {'form': form, 'allow_signup': bool(settings.ALLOW_SIGNUP)}) 22 23 def post(self, request, *args, **kwargs): 24 form = self.form_class(request.POST) 25 26 # here we make sure that a post request won't trigger a subscription in case allow_signup is False 27 if not bool(settings.ALLOW_SIGNUP): 28 return redirect('signup') 29 30 if not hasattr(settings, "EMAIL_BACKEND") and not hasattr(settings, "EMAIL_HOST"): 31 return render(request, 'email_not_set.html') 32 33 if form.is_valid(): 34 user = form.save(commit=False) 35 user.is_active = False 36 user.save() 37 current_site = get_current_site(request) 38 mail_subject = 'Activate your account.' 39 message = render_to_string('acc_active_email.html', { 40 'user': user, 41 'domain': current_site.domain, 42 'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(), 43 'token': account_activation_token.make_token(user), 44 }) 45 to_email = form.cleaned_data.get('email') 46 email = EmailMessage( 47 mail_subject, message, to=[to_email] 48 ) 49 email.send() 50 return render(request, 'validate_mail_address_complete.html') 51 else: 52 return render(request, self.template_name, {'form': form, 'allow_signup': bool(settings.ALLOW_SIGNUP)}) 53 [end of app/authentification/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/authentification/views.py b/app/authentification/views.py --- a/app/authentification/views.py +++ b/app/authentification/views.py @@ -9,7 +9,7 @@ from django.views.generic import TemplateView from django.shortcuts import redirect -from app import settings +from django.conf import settings class SignupView(TemplateView):
{"golden_diff": "diff --git a/app/authentification/views.py b/app/authentification/views.py\n--- a/app/authentification/views.py\n+++ b/app/authentification/views.py\n@@ -9,7 +9,7 @@\n from django.views.generic import TemplateView\n from django.shortcuts import redirect\n \n-from app import settings\n+from django.conf import settings\n \n \n class SignupView(TemplateView):\n", "issue": "New user signup page question\nHi i'm trying to understand the user structure. I see a few posts about only being able to assign users to specific projects through the django admin screen, but my question is about the 'sign up' page you get offered when you click login, is this totally non functional? That is, is the *only* way to make new users of any kind through the django admin page?\r\n\r\nThanks,\r\nZ\n", "before_files": [{"content": "from django.shortcuts import render\nfrom .forms import SignupForm\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.template.loader import render_to_string\nfrom .tokens import account_activation_token\nfrom django.core.mail import EmailMessage\nfrom django.views.generic import TemplateView\nfrom django.shortcuts import redirect\n\nfrom app import settings\n\n\nclass SignupView(TemplateView):\n template_name = 'signup.html'\n form_class = SignupForm\n\n def get(self, request, *args, **kwargs):\n form = self.form_class()\n return render(request, self.template_name, {'form': form, 'allow_signup': bool(settings.ALLOW_SIGNUP)})\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n\n # here we make sure that a post request won't trigger a subscription in case allow_signup is False\n if not bool(settings.ALLOW_SIGNUP):\n return redirect('signup')\n\n if not hasattr(settings, \"EMAIL_BACKEND\") and not hasattr(settings, \"EMAIL_HOST\"):\n return render(request, 'email_not_set.html')\n\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n current_site = get_current_site(request)\n mail_subject = 'Activate your account.'\n message = render_to_string('acc_active_email.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(),\n 'token': account_activation_token.make_token(user),\n })\n to_email = form.cleaned_data.get('email')\n email = EmailMessage(\n mail_subject, message, to=[to_email]\n )\n email.send()\n return render(request, 'validate_mail_address_complete.html')\n else:\n return render(request, self.template_name, {'form': form, 'allow_signup': bool(settings.ALLOW_SIGNUP)})\n", "path": "app/authentification/views.py"}]}
1,151
81
gh_patches_debug_16657
rasdani/github-patches
git_diff
deepchecks__deepchecks-575
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [FEAT] Visualization of Regression Systematic Error box plot unclear **Is your feature request related to a problem? Please describe.** Visualization of Regression Systematic Error box plot unclear ![Screen Shot 2022-01-06 at 16 14 53](https://user-images.githubusercontent.com/26321553/148396690-7115e4e1-c81b-4458-81c7-f0d074db6990.png) **Describe the solution you'd like** Labels to be shorter and not sideways. Also labels are unclear. </issue> <code> [start of deepchecks/checks/performance/regression_systematic_error.py] 1 # ---------------------------------------------------------------------------- 2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com) 3 # 4 # This file is part of Deepchecks. 5 # Deepchecks is distributed under the terms of the GNU Affero General 6 # Public License (version 3 or later). 7 # You should have received a copy of the GNU Affero General Public License 8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>. 9 # ---------------------------------------------------------------------------- 10 # 11 """The RegressionSystematicError check module.""" 12 import plotly.graph_objects as go 13 from sklearn.base import BaseEstimator 14 from sklearn.metrics import mean_squared_error 15 16 from deepchecks import CheckResult, Dataset, SingleDatasetBaseCheck, ConditionResult 17 from deepchecks.utils.metrics import ModelType, task_type_validation 18 from deepchecks.utils.strings import format_number 19 20 21 __all__ = ['RegressionSystematicError'] 22 23 24 class RegressionSystematicError(SingleDatasetBaseCheck): 25 """Check the regression systematic error.""" 26 27 def run(self, dataset: Dataset, model: BaseEstimator) -> CheckResult: 28 """Run check. 29 30 Arguments: 31 dataset (Dataset): A dataset object. 32 model (BaseEstimator): A scikit-learn-compatible fitted estimator instance 33 Returns: 34 CheckResult: 35 - value is a dict with rmse and mean prediction error. 36 - display is box plot of the prediction errorד. 37 Raises: 38 DeepchecksValueError: If the object is not a Dataset instance with a label 39 """ 40 return self._regression_error_distribution(dataset, model) 41 42 def _regression_error_distribution(self, dataset: Dataset, model: BaseEstimator): 43 Dataset.validate_dataset(dataset) 44 dataset.validate_label() 45 task_type_validation(model, dataset, [ModelType.REGRESSION]) 46 47 y_test = dataset.label_col 48 y_pred = model.predict(dataset.features_columns) 49 50 rmse = mean_squared_error(dataset.label_col, y_pred, squared=False) 51 diff = y_test - y_pred 52 diff_mean = diff.mean() 53 54 fig = go.Figure() 55 fig.add_trace(go.Box( 56 x=diff, 57 name='Model prediction error', 58 boxmean=True # represent mean 59 )).update_layout( 60 title_text='Box plot of the model prediction error', 61 width=800, height=500) 62 63 display = [ 64 'Non-zero mean of the error distribution indicated the presents ' 65 'of systematic error in model predictions', 66 fig 67 ] 68 69 return CheckResult(value={'rmse': rmse, 'mean_error': diff_mean}, display=display) 70 71 def add_condition_systematic_error_ratio_to_rmse_not_greater_than(self, max_ratio: float = 0.01): 72 """Add condition - require the absolute mean systematic error to be not greater than (max_ratio * RMSE). 73 74 Args: 75 max_ratio (float): Maximum ratio 76 """ 77 def max_bias_condition(result: float) -> ConditionResult: 78 rmse = result['rmse'] 79 mean_error = result['mean_error'] 80 if abs(mean_error) > max_ratio * rmse: 81 return ConditionResult(False, 82 f'mean error: {format_number(mean_error, 5)}, RMSE: {format_number(rmse)}') 83 else: 84 return ConditionResult(True) 85 86 return self.add_condition(f'Bias ratio is not greater than {format_number(max_ratio)}', 87 max_bias_condition) 88 [end of deepchecks/checks/performance/regression_systematic_error.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/deepchecks/checks/performance/regression_systematic_error.py b/deepchecks/checks/performance/regression_systematic_error.py --- a/deepchecks/checks/performance/regression_systematic_error.py +++ b/deepchecks/checks/performance/regression_systematic_error.py @@ -51,14 +51,19 @@ diff = y_test - y_pred diff_mean = diff.mean() - fig = go.Figure() - fig.add_trace(go.Box( - x=diff, - name='Model prediction error', - boxmean=True # represent mean - )).update_layout( + fig = ( + go.Figure() + .add_trace(go.Box( + x=diff, + orientation='h', + name='Model prediction error', + hoverinfo='x', + boxmean=True)) + .update_layout( title_text='Box plot of the model prediction error', - width=800, height=500) + width=800, + height=500) + ) display = [ 'Non-zero mean of the error distribution indicated the presents '
{"golden_diff": "diff --git a/deepchecks/checks/performance/regression_systematic_error.py b/deepchecks/checks/performance/regression_systematic_error.py\n--- a/deepchecks/checks/performance/regression_systematic_error.py\n+++ b/deepchecks/checks/performance/regression_systematic_error.py\n@@ -51,14 +51,19 @@\n diff = y_test - y_pred\n diff_mean = diff.mean()\n \n- fig = go.Figure()\n- fig.add_trace(go.Box(\n- x=diff,\n- name='Model prediction error',\n- boxmean=True # represent mean\n- )).update_layout(\n+ fig = (\n+ go.Figure()\n+ .add_trace(go.Box(\n+ x=diff,\n+ orientation='h',\n+ name='Model prediction error',\n+ hoverinfo='x',\n+ boxmean=True))\n+ .update_layout(\n title_text='Box plot of the model prediction error',\n- width=800, height=500)\n+ width=800,\n+ height=500)\n+ )\n \n display = [\n 'Non-zero mean of the error distribution indicated the presents '\n", "issue": "[FEAT] Visualization of Regression Systematic Error box plot unclear\n**Is your feature request related to a problem? Please describe.**\r\nVisualization of Regression Systematic Error box plot unclear\r\n\r\n![Screen Shot 2022-01-06 at 16 14 53](https://user-images.githubusercontent.com/26321553/148396690-7115e4e1-c81b-4458-81c7-f0d074db6990.png)\r\n\r\n**Describe the solution you'd like**\r\nLabels to be shorter and not sideways. Also labels are unclear.\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"The RegressionSystematicError check module.\"\"\"\nimport plotly.graph_objects as go\nfrom sklearn.base import BaseEstimator\nfrom sklearn.metrics import mean_squared_error\n\nfrom deepchecks import CheckResult, Dataset, SingleDatasetBaseCheck, ConditionResult\nfrom deepchecks.utils.metrics import ModelType, task_type_validation\nfrom deepchecks.utils.strings import format_number\n\n\n__all__ = ['RegressionSystematicError']\n\n\nclass RegressionSystematicError(SingleDatasetBaseCheck):\n \"\"\"Check the regression systematic error.\"\"\"\n\n def run(self, dataset: Dataset, model: BaseEstimator) -> CheckResult:\n \"\"\"Run check.\n\n Arguments:\n dataset (Dataset): A dataset object.\n model (BaseEstimator): A scikit-learn-compatible fitted estimator instance\n Returns:\n CheckResult:\n - value is a dict with rmse and mean prediction error.\n - display is box plot of the prediction error\u05d3.\n Raises:\n DeepchecksValueError: If the object is not a Dataset instance with a label\n \"\"\"\n return self._regression_error_distribution(dataset, model)\n\n def _regression_error_distribution(self, dataset: Dataset, model: BaseEstimator):\n Dataset.validate_dataset(dataset)\n dataset.validate_label()\n task_type_validation(model, dataset, [ModelType.REGRESSION])\n\n y_test = dataset.label_col\n y_pred = model.predict(dataset.features_columns)\n\n rmse = mean_squared_error(dataset.label_col, y_pred, squared=False)\n diff = y_test - y_pred\n diff_mean = diff.mean()\n\n fig = go.Figure()\n fig.add_trace(go.Box(\n x=diff,\n name='Model prediction error',\n boxmean=True # represent mean\n )).update_layout(\n title_text='Box plot of the model prediction error',\n width=800, height=500)\n\n display = [\n 'Non-zero mean of the error distribution indicated the presents '\n 'of systematic error in model predictions',\n fig\n ]\n\n return CheckResult(value={'rmse': rmse, 'mean_error': diff_mean}, display=display)\n\n def add_condition_systematic_error_ratio_to_rmse_not_greater_than(self, max_ratio: float = 0.01):\n \"\"\"Add condition - require the absolute mean systematic error to be not greater than (max_ratio * RMSE).\n\n Args:\n max_ratio (float): Maximum ratio\n \"\"\"\n def max_bias_condition(result: float) -> ConditionResult:\n rmse = result['rmse']\n mean_error = result['mean_error']\n if abs(mean_error) > max_ratio * rmse:\n return ConditionResult(False,\n f'mean error: {format_number(mean_error, 5)}, RMSE: {format_number(rmse)}')\n else:\n return ConditionResult(True)\n\n return self.add_condition(f'Bias ratio is not greater than {format_number(max_ratio)}',\n max_bias_condition)\n", "path": "deepchecks/checks/performance/regression_systematic_error.py"}]}
1,583
264
gh_patches_debug_22661
rasdani/github-patches
git_diff
freqtrade__freqtrade-1955
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Hyperopt Resamples Python Version: 3.6.7 CCXT version: 1.18.507 Branch: Develop Last Commit ID: 4ae743e ## Step 3: Describe the problem: self.ticker_interval is not implemented in the hyperopt interface, so you cannot resample data to higher timeframes. </issue> <code> [start of freqtrade/optimize/hyperopt_interface.py] 1 """ 2 IHyperOpt interface 3 This module defines the interface to apply for hyperopts 4 """ 5 6 from abc import ABC, abstractmethod 7 from typing import Dict, Any, Callable, List 8 9 from pandas import DataFrame 10 from skopt.space import Dimension 11 12 13 class IHyperOpt(ABC): 14 """ 15 Interface for freqtrade hyperopts 16 Defines the mandatory structure must follow any custom strategies 17 18 Attributes you can use: 19 minimal_roi -> Dict: Minimal ROI designed for the strategy 20 stoploss -> float: optimal stoploss designed for the strategy 21 ticker_interval -> int: value of the ticker interval to use for the strategy 22 """ 23 24 @staticmethod 25 @abstractmethod 26 def populate_indicators(dataframe: DataFrame, metadata: dict) -> DataFrame: 27 """ 28 Populate indicators that will be used in the Buy and Sell strategy 29 :param dataframe: Raw data from the exchange and parsed by parse_ticker_dataframe() 30 :return: a Dataframe with all mandatory indicators for the strategies 31 """ 32 33 @staticmethod 34 @abstractmethod 35 def buy_strategy_generator(params: Dict[str, Any]) -> Callable: 36 """ 37 Create a buy strategy generator 38 """ 39 40 @staticmethod 41 @abstractmethod 42 def sell_strategy_generator(params: Dict[str, Any]) -> Callable: 43 """ 44 Create a sell strategy generator 45 """ 46 47 @staticmethod 48 @abstractmethod 49 def indicator_space() -> List[Dimension]: 50 """ 51 Create an indicator space 52 """ 53 54 @staticmethod 55 @abstractmethod 56 def sell_indicator_space() -> List[Dimension]: 57 """ 58 Create a sell indicator space 59 """ 60 61 @staticmethod 62 @abstractmethod 63 def generate_roi_table(params: Dict) -> Dict[int, float]: 64 """ 65 Create an roi table 66 """ 67 68 @staticmethod 69 @abstractmethod 70 def stoploss_space() -> List[Dimension]: 71 """ 72 Create a stoploss space 73 """ 74 75 @staticmethod 76 @abstractmethod 77 def roi_space() -> List[Dimension]: 78 """ 79 Create a roi space 80 """ 81 [end of freqtrade/optimize/hyperopt_interface.py] [start of freqtrade/resolvers/hyperopt_resolver.py] 1 # pragma pylint: disable=attribute-defined-outside-init 2 3 """ 4 This module load custom hyperopts 5 """ 6 import logging 7 from pathlib import Path 8 from typing import Optional, Dict 9 10 from freqtrade.constants import DEFAULT_HYPEROPT 11 from freqtrade.optimize.hyperopt_interface import IHyperOpt 12 from freqtrade.resolvers import IResolver 13 14 logger = logging.getLogger(__name__) 15 16 17 class HyperOptResolver(IResolver): 18 """ 19 This class contains all the logic to load custom hyperopt class 20 """ 21 22 __slots__ = ['hyperopt'] 23 24 def __init__(self, config: Optional[Dict] = None) -> None: 25 """ 26 Load the custom class from config parameter 27 :param config: configuration dictionary or None 28 """ 29 config = config or {} 30 31 # Verify the hyperopt is in the configuration, otherwise fallback to the default hyperopt 32 hyperopt_name = config.get('hyperopt') or DEFAULT_HYPEROPT 33 self.hyperopt = self._load_hyperopt(hyperopt_name, extra_dir=config.get('hyperopt_path')) 34 35 if not hasattr(self.hyperopt, 'populate_buy_trend'): 36 logger.warning("Custom Hyperopt does not provide populate_buy_trend. " 37 "Using populate_buy_trend from DefaultStrategy.") 38 if not hasattr(self.hyperopt, 'populate_sell_trend'): 39 logger.warning("Custom Hyperopt does not provide populate_sell_trend. " 40 "Using populate_sell_trend from DefaultStrategy.") 41 42 def _load_hyperopt( 43 self, hyperopt_name: str, extra_dir: Optional[str] = None) -> IHyperOpt: 44 """ 45 Search and loads the specified hyperopt. 46 :param hyperopt_name: name of the module to import 47 :param extra_dir: additional directory to search for the given hyperopt 48 :return: HyperOpt instance or None 49 """ 50 current_path = Path(__file__).parent.parent.joinpath('optimize').resolve() 51 52 abs_paths = [ 53 current_path.parent.parent.joinpath('user_data/hyperopts'), 54 current_path, 55 ] 56 57 if extra_dir: 58 # Add extra hyperopt directory on top of search paths 59 abs_paths.insert(0, Path(extra_dir)) 60 61 for _path in abs_paths: 62 try: 63 hyperopt = self._search_object(directory=_path, object_type=IHyperOpt, 64 object_name=hyperopt_name) 65 if hyperopt: 66 logger.info("Using resolved hyperopt %s from '%s'", hyperopt_name, _path) 67 return hyperopt 68 except FileNotFoundError: 69 logger.warning('Path "%s" does not exist', _path.relative_to(Path.cwd())) 70 71 raise ImportError( 72 "Impossible to load Hyperopt '{}'. This class does not exist" 73 " or contains Python code errors".format(hyperopt_name) 74 ) 75 [end of freqtrade/resolvers/hyperopt_resolver.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/freqtrade/optimize/hyperopt_interface.py b/freqtrade/optimize/hyperopt_interface.py --- a/freqtrade/optimize/hyperopt_interface.py +++ b/freqtrade/optimize/hyperopt_interface.py @@ -20,6 +20,7 @@ stoploss -> float: optimal stoploss designed for the strategy ticker_interval -> int: value of the ticker interval to use for the strategy """ + ticker_interval: str @staticmethod @abstractmethod diff --git a/freqtrade/resolvers/hyperopt_resolver.py b/freqtrade/resolvers/hyperopt_resolver.py --- a/freqtrade/resolvers/hyperopt_resolver.py +++ b/freqtrade/resolvers/hyperopt_resolver.py @@ -32,6 +32,9 @@ hyperopt_name = config.get('hyperopt') or DEFAULT_HYPEROPT self.hyperopt = self._load_hyperopt(hyperopt_name, extra_dir=config.get('hyperopt_path')) + # Assign ticker_interval to be used in hyperopt + self.hyperopt.__class__.ticker_interval = str(config['ticker_interval']) + if not hasattr(self.hyperopt, 'populate_buy_trend'): logger.warning("Custom Hyperopt does not provide populate_buy_trend. " "Using populate_buy_trend from DefaultStrategy.")
{"golden_diff": "diff --git a/freqtrade/optimize/hyperopt_interface.py b/freqtrade/optimize/hyperopt_interface.py\n--- a/freqtrade/optimize/hyperopt_interface.py\n+++ b/freqtrade/optimize/hyperopt_interface.py\n@@ -20,6 +20,7 @@\n stoploss -> float: optimal stoploss designed for the strategy\n ticker_interval -> int: value of the ticker interval to use for the strategy\n \"\"\"\n+ ticker_interval: str\n \n @staticmethod\n @abstractmethod\ndiff --git a/freqtrade/resolvers/hyperopt_resolver.py b/freqtrade/resolvers/hyperopt_resolver.py\n--- a/freqtrade/resolvers/hyperopt_resolver.py\n+++ b/freqtrade/resolvers/hyperopt_resolver.py\n@@ -32,6 +32,9 @@\n hyperopt_name = config.get('hyperopt') or DEFAULT_HYPEROPT\n self.hyperopt = self._load_hyperopt(hyperopt_name, extra_dir=config.get('hyperopt_path'))\n \n+ # Assign ticker_interval to be used in hyperopt\n+ self.hyperopt.__class__.ticker_interval = str(config['ticker_interval'])\n+\n if not hasattr(self.hyperopt, 'populate_buy_trend'):\n logger.warning(\"Custom Hyperopt does not provide populate_buy_trend. \"\n \"Using populate_buy_trend from DefaultStrategy.\")\n", "issue": "Hyperopt Resamples\n Python Version: 3.6.7\r\n CCXT version: 1.18.507\r\n Branch: Develop\r\n Last Commit ID: 4ae743e\r\n\r\n \r\n## Step 3: Describe the problem:\r\n\r\nself.ticker_interval is not implemented in the hyperopt interface, so you cannot resample data to higher timeframes.\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nIHyperOpt interface\nThis module defines the interface to apply for hyperopts\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom typing import Dict, Any, Callable, List\n\nfrom pandas import DataFrame\nfrom skopt.space import Dimension\n\n\nclass IHyperOpt(ABC):\n \"\"\"\n Interface for freqtrade hyperopts\n Defines the mandatory structure must follow any custom strategies\n\n Attributes you can use:\n minimal_roi -> Dict: Minimal ROI designed for the strategy\n stoploss -> float: optimal stoploss designed for the strategy\n ticker_interval -> int: value of the ticker interval to use for the strategy\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def populate_indicators(dataframe: DataFrame, metadata: dict) -> DataFrame:\n \"\"\"\n Populate indicators that will be used in the Buy and Sell strategy\n :param dataframe: Raw data from the exchange and parsed by parse_ticker_dataframe()\n :return: a Dataframe with all mandatory indicators for the strategies\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def buy_strategy_generator(params: Dict[str, Any]) -> Callable:\n \"\"\"\n Create a buy strategy generator\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def sell_strategy_generator(params: Dict[str, Any]) -> Callable:\n \"\"\"\n Create a sell strategy generator\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def indicator_space() -> List[Dimension]:\n \"\"\"\n Create an indicator space\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def sell_indicator_space() -> List[Dimension]:\n \"\"\"\n Create a sell indicator space\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def generate_roi_table(params: Dict) -> Dict[int, float]:\n \"\"\"\n Create an roi table\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def stoploss_space() -> List[Dimension]:\n \"\"\"\n Create a stoploss space\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def roi_space() -> List[Dimension]:\n \"\"\"\n Create a roi space\n \"\"\"\n", "path": "freqtrade/optimize/hyperopt_interface.py"}, {"content": "# pragma pylint: disable=attribute-defined-outside-init\n\n\"\"\"\nThis module load custom hyperopts\n\"\"\"\nimport logging\nfrom pathlib import Path\nfrom typing import Optional, Dict\n\nfrom freqtrade.constants import DEFAULT_HYPEROPT\nfrom freqtrade.optimize.hyperopt_interface import IHyperOpt\nfrom freqtrade.resolvers import IResolver\n\nlogger = logging.getLogger(__name__)\n\n\nclass HyperOptResolver(IResolver):\n \"\"\"\n This class contains all the logic to load custom hyperopt class\n \"\"\"\n\n __slots__ = ['hyperopt']\n\n def __init__(self, config: Optional[Dict] = None) -> None:\n \"\"\"\n Load the custom class from config parameter\n :param config: configuration dictionary or None\n \"\"\"\n config = config or {}\n\n # Verify the hyperopt is in the configuration, otherwise fallback to the default hyperopt\n hyperopt_name = config.get('hyperopt') or DEFAULT_HYPEROPT\n self.hyperopt = self._load_hyperopt(hyperopt_name, extra_dir=config.get('hyperopt_path'))\n\n if not hasattr(self.hyperopt, 'populate_buy_trend'):\n logger.warning(\"Custom Hyperopt does not provide populate_buy_trend. \"\n \"Using populate_buy_trend from DefaultStrategy.\")\n if not hasattr(self.hyperopt, 'populate_sell_trend'):\n logger.warning(\"Custom Hyperopt does not provide populate_sell_trend. \"\n \"Using populate_sell_trend from DefaultStrategy.\")\n\n def _load_hyperopt(\n self, hyperopt_name: str, extra_dir: Optional[str] = None) -> IHyperOpt:\n \"\"\"\n Search and loads the specified hyperopt.\n :param hyperopt_name: name of the module to import\n :param extra_dir: additional directory to search for the given hyperopt\n :return: HyperOpt instance or None\n \"\"\"\n current_path = Path(__file__).parent.parent.joinpath('optimize').resolve()\n\n abs_paths = [\n current_path.parent.parent.joinpath('user_data/hyperopts'),\n current_path,\n ]\n\n if extra_dir:\n # Add extra hyperopt directory on top of search paths\n abs_paths.insert(0, Path(extra_dir))\n\n for _path in abs_paths:\n try:\n hyperopt = self._search_object(directory=_path, object_type=IHyperOpt,\n object_name=hyperopt_name)\n if hyperopt:\n logger.info(\"Using resolved hyperopt %s from '%s'\", hyperopt_name, _path)\n return hyperopt\n except FileNotFoundError:\n logger.warning('Path \"%s\" does not exist', _path.relative_to(Path.cwd()))\n\n raise ImportError(\n \"Impossible to load Hyperopt '{}'. This class does not exist\"\n \" or contains Python code errors\".format(hyperopt_name)\n )\n", "path": "freqtrade/resolvers/hyperopt_resolver.py"}]}
1,982
295
gh_patches_debug_18144
rasdani/github-patches
git_diff
SeldonIO__MLServer-190
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Infer model's name from folder's name Currently, particularly in the case of MMS, MLServer requires models to specify their name in a `model-settings.json` file. This forces all models to ship that file alongside their model artifacts. It would be good to, instead, infer the model's name from the folder name (if not present in env). This would reduce friction on adopting MLServer, the V2 protocol and MMS. </issue> <code> [start of mlserver/repository.py] 1 import os 2 import glob 3 4 from typing import List 5 6 from .settings import ModelParameters, ModelSettings 7 from .errors import ModelNotFound 8 9 DEFAULT_MODEL_SETTINGS_FILENAME = "model-settings.json" 10 11 12 class ModelRepository: 13 """ 14 Model repository, responsible of the discovery of models which can be 15 loaded onto the model registry. 16 """ 17 18 def __init__(self, root: str = None): 19 self._root = root 20 21 async def list(self) -> List[ModelSettings]: 22 all_model_settings = [] 23 24 # TODO: Use an async alternative for filesys ops 25 if self._root: 26 pattern = os.path.join(self._root, "**", DEFAULT_MODEL_SETTINGS_FILENAME) 27 matches = glob.glob(pattern, recursive=True) 28 29 for model_settings_path in matches: 30 model_settings = self._load_model_settings(model_settings_path) 31 all_model_settings.append(model_settings) 32 33 # If there were no matches, try to load model from environment 34 if not all_model_settings: 35 # return default 36 model_settings = ModelSettings() 37 model_settings.parameters = ModelParameters() 38 all_model_settings.append(model_settings) 39 40 return all_model_settings 41 42 def _load_model_settings(self, model_settings_path: str) -> ModelSettings: 43 model_settings = ModelSettings.parse_file(model_settings_path) 44 45 if not model_settings.parameters: 46 model_settings.parameters = ModelParameters() 47 48 if not model_settings.parameters.uri: 49 # If not specified, default to its own folder 50 default_model_uri = os.path.dirname(model_settings_path) 51 model_settings.parameters.uri = default_model_uri 52 53 return model_settings 54 55 async def find(self, name: str) -> ModelSettings: 56 all_settings = await self.list() 57 for model_settings in all_settings: 58 if model_settings.name == name: 59 # TODO: Implement version policy 60 return model_settings 61 62 raise ModelNotFound(name) 63 [end of mlserver/repository.py] [start of mlserver/settings.py] 1 from typing import List, Optional 2 from pydantic import BaseSettings, PyObject 3 4 from .version import __version__ 5 from .types import MetadataTensor 6 7 ENV_PREFIX_SETTINGS = "MLSERVER_" 8 ENV_PREFIX_MODEL_SETTINGS = "MLSERVER_MODEL_" 9 10 11 class Settings(BaseSettings): 12 class Config: 13 env_prefix = ENV_PREFIX_SETTINGS 14 15 debug: bool = True 16 17 # Model repository folder 18 model_repository_root: str = "." 19 load_models_at_startup: bool = True 20 21 # Server metadata 22 server_name: str = "mlserver" 23 server_version: str = __version__ 24 extensions: List[str] = [] 25 26 # Server settings 27 host: str = "0.0.0.0" 28 http_port: int = 8080 29 grpc_port: int = 8081 30 grpc_workers: int = 10 31 32 33 class ModelParameters(BaseSettings): 34 """ 35 Parameters that apply only to a particular instance of a model. 36 This can include things like model weights. 37 The main difference with respect to ModelSettings is that parameters can 38 change on each instance (e.g. each version) of the model. 39 """ 40 41 class Config: 42 env_prefix = ENV_PREFIX_MODEL_SETTINGS 43 44 uri: Optional[str] = None 45 version: Optional[str] = None 46 format: Optional[str] = None 47 48 49 class ModelSettings(BaseSettings): 50 class Config: 51 env_prefix = ENV_PREFIX_MODEL_SETTINGS 52 53 name: str 54 55 # Model metadata 56 platform: str = "" 57 versions: Optional[List[str]] = [] 58 inputs: Optional[List[MetadataTensor]] = [] 59 outputs: Optional[List[MetadataTensor]] = [] 60 61 # Custom model class implementation 62 implementation: PyObject = "mlserver.model.MLModel" # type: ignore 63 64 # Model parameters are meant to be set directly by the MLServer runtime. 65 # However, it's also possible to override them manually. 66 parameters: Optional[ModelParameters] = None 67 [end of mlserver/settings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mlserver/repository.py b/mlserver/repository.py --- a/mlserver/repository.py +++ b/mlserver/repository.py @@ -42,6 +42,12 @@ def _load_model_settings(self, model_settings_path: str) -> ModelSettings: model_settings = ModelSettings.parse_file(model_settings_path) + # TODO: Raise warning if name is different than folder's name + if not model_settings.name: + # If name not present, default to folder name + default_model_name = os.path.basename(os.path.dirname(model_settings_path)) + model_settings.name = default_model_name + if not model_settings.parameters: model_settings.parameters = ModelParameters() diff --git a/mlserver/settings.py b/mlserver/settings.py --- a/mlserver/settings.py +++ b/mlserver/settings.py @@ -50,7 +50,7 @@ class Config: env_prefix = ENV_PREFIX_MODEL_SETTINGS - name: str + name: str = "" # Model metadata platform: str = ""
{"golden_diff": "diff --git a/mlserver/repository.py b/mlserver/repository.py\n--- a/mlserver/repository.py\n+++ b/mlserver/repository.py\n@@ -42,6 +42,12 @@\n def _load_model_settings(self, model_settings_path: str) -> ModelSettings:\n model_settings = ModelSettings.parse_file(model_settings_path)\n \n+ # TODO: Raise warning if name is different than folder's name\n+ if not model_settings.name:\n+ # If name not present, default to folder name\n+ default_model_name = os.path.basename(os.path.dirname(model_settings_path))\n+ model_settings.name = default_model_name\n+\n if not model_settings.parameters:\n model_settings.parameters = ModelParameters()\n \ndiff --git a/mlserver/settings.py b/mlserver/settings.py\n--- a/mlserver/settings.py\n+++ b/mlserver/settings.py\n@@ -50,7 +50,7 @@\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n \n- name: str\n+ name: str = \"\"\n \n # Model metadata\n platform: str = \"\"\n", "issue": "Infer model's name from folder's name\nCurrently, particularly in the case of MMS, MLServer requires models to specify their name in a `model-settings.json` file. This forces all models to ship that file alongside their model artifacts.\r\n\r\nIt would be good to, instead, infer the model's name from the folder name (if not present in env). This would reduce friction on adopting MLServer, the V2 protocol and MMS.\n", "before_files": [{"content": "import os\nimport glob\n\nfrom typing import List\n\nfrom .settings import ModelParameters, ModelSettings\nfrom .errors import ModelNotFound\n\nDEFAULT_MODEL_SETTINGS_FILENAME = \"model-settings.json\"\n\n\nclass ModelRepository:\n \"\"\"\n Model repository, responsible of the discovery of models which can be\n loaded onto the model registry.\n \"\"\"\n\n def __init__(self, root: str = None):\n self._root = root\n\n async def list(self) -> List[ModelSettings]:\n all_model_settings = []\n\n # TODO: Use an async alternative for filesys ops\n if self._root:\n pattern = os.path.join(self._root, \"**\", DEFAULT_MODEL_SETTINGS_FILENAME)\n matches = glob.glob(pattern, recursive=True)\n\n for model_settings_path in matches:\n model_settings = self._load_model_settings(model_settings_path)\n all_model_settings.append(model_settings)\n\n # If there were no matches, try to load model from environment\n if not all_model_settings:\n # return default\n model_settings = ModelSettings()\n model_settings.parameters = ModelParameters()\n all_model_settings.append(model_settings)\n\n return all_model_settings\n\n def _load_model_settings(self, model_settings_path: str) -> ModelSettings:\n model_settings = ModelSettings.parse_file(model_settings_path)\n\n if not model_settings.parameters:\n model_settings.parameters = ModelParameters()\n\n if not model_settings.parameters.uri:\n # If not specified, default to its own folder\n default_model_uri = os.path.dirname(model_settings_path)\n model_settings.parameters.uri = default_model_uri\n\n return model_settings\n\n async def find(self, name: str) -> ModelSettings:\n all_settings = await self.list()\n for model_settings in all_settings:\n if model_settings.name == name:\n # TODO: Implement version policy\n return model_settings\n\n raise ModelNotFound(name)\n", "path": "mlserver/repository.py"}, {"content": "from typing import List, Optional\nfrom pydantic import BaseSettings, PyObject\n\nfrom .version import __version__\nfrom .types import MetadataTensor\n\nENV_PREFIX_SETTINGS = \"MLSERVER_\"\nENV_PREFIX_MODEL_SETTINGS = \"MLSERVER_MODEL_\"\n\n\nclass Settings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_SETTINGS\n\n debug: bool = True\n\n # Model repository folder\n model_repository_root: str = \".\"\n load_models_at_startup: bool = True\n\n # Server metadata\n server_name: str = \"mlserver\"\n server_version: str = __version__\n extensions: List[str] = []\n\n # Server settings\n host: str = \"0.0.0.0\"\n http_port: int = 8080\n grpc_port: int = 8081\n grpc_workers: int = 10\n\n\nclass ModelParameters(BaseSettings):\n \"\"\"\n Parameters that apply only to a particular instance of a model.\n This can include things like model weights.\n The main difference with respect to ModelSettings is that parameters can\n change on each instance (e.g. each version) of the model.\n \"\"\"\n\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n uri: Optional[str] = None\n version: Optional[str] = None\n format: Optional[str] = None\n\n\nclass ModelSettings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n name: str\n\n # Model metadata\n platform: str = \"\"\n versions: Optional[List[str]] = []\n inputs: Optional[List[MetadataTensor]] = []\n outputs: Optional[List[MetadataTensor]] = []\n\n # Custom model class implementation\n implementation: PyObject = \"mlserver.model.MLModel\" # type: ignore\n\n # Model parameters are meant to be set directly by the MLServer runtime.\n # However, it's also possible to override them manually.\n parameters: Optional[ModelParameters] = None\n", "path": "mlserver/settings.py"}]}
1,721
231
gh_patches_debug_3929
rasdani/github-patches
git_diff
pyload__pyload-1340
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DepositfilesCom Parse error Hi, debuglog http://pastebin.com/v0SGFL9R handleFree_line44.dump.html http://pastebin.com/2V3Rjn7M handleFree_line60.dump.html http://pastebin.com/mxPsFQgk preload_line455.dump.html http://pastebin.com/F9vBdCct TIA </issue> <code> [start of module/plugins/hoster/DepositfilesCom.py] 1 # -*- coding: utf-8 -*- 2 3 import re 4 5 from urllib import unquote 6 7 from module.plugins.internal.CaptchaService import ReCaptcha 8 from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo 9 10 11 class DepositfilesCom(SimpleHoster): 12 __name__ = "DepositfilesCom" 13 __type__ = "hoster" 14 __version__ = "0.54" 15 16 __pattern__ = r'https?://(?:www\.)?(depositfiles\.com|dfiles\.(eu|ru))(/\w{1,3})?/files/(?P<ID>\w+)' 17 __config__ = [("use_premium", "bool", "Use premium account if available", True)] 18 19 __description__ = """Depositfiles.com hoster plugin""" 20 __license__ = "GPLv3" 21 __authors__ = [("spoob", "[email protected]"), 22 ("zoidberg", "[email protected]"), 23 ("Walter Purcaro", "[email protected]")] 24 25 26 NAME_PATTERN = r'<script type="text/javascript">eval\( unescape\(\'(?P<N>.*?)\'' 27 SIZE_PATTERN = r': <b>(?P<S>[\d.,]+)&nbsp;(?P<U>[\w^_]+)</b>' 28 OFFLINE_PATTERN = r'<span class="html_download_api-not_exists"></span>' 29 30 NAME_REPLACEMENTS = [(r'\%u([0-9A-Fa-f]{4})', lambda m: unichr(int(m.group(1), 16))), 31 (r'.*<b title="(?P<N>.+?)".*', "\g<N>")] 32 URL_REPLACEMENTS = [(__pattern__ + ".*", "https://dfiles.eu/files/\g<ID>")] 33 34 COOKIES = [("dfiles.eu", "lang_current", "en")] 35 36 WAIT_PATTERN = r'(?:download_waiter_remain">|html_download_api-limit_interval">|>Please wait|>Try in).+' 37 ERROR_PATTER = r'File is checked, please try again in a minute' 38 39 LINK_FREE_PATTERN = r'<form id="downloader_file_form" action="(http://.+?\.(dfiles\.eu|depositfiles\.com)/.+?)" method="post"' 40 LINK_PREMIUM_PATTERN = r'class="repeat"><a href="(.+?)"' 41 LINK_MIRROR_PATTERN = r'class="repeat_mirror"><a href="(.+?)"' 42 43 44 def handleFree(self, pyfile): 45 self.html = self.load(pyfile.url, post={'gateway_result': "1"}) 46 47 self.checkErrors() 48 49 m = re.search(r"var fid = '(\w+)';", self.html) 50 if m is None: 51 self.retry(wait_time=5) 52 params = {'fid': m.group(1)} 53 self.logDebug("FID: %s" % params['fid']) 54 55 self.wait() 56 recaptcha = ReCaptcha(self) 57 captcha_key = recaptcha.detect_key() 58 if captcha_key is None: 59 return 60 61 self.html = self.load("https://dfiles.eu/get_file.php", get=params) 62 63 if '<input type=button value="Continue" onclick="check_recaptcha' in self.html: 64 params['response'], params['challenge'] = recaptcha.challenge(captcha_key) 65 self.html = self.load("https://dfiles.eu/get_file.php", get=params) 66 67 m = re.search(self.LINK_FREE_PATTERN, self.html) 68 if m: 69 if 'response' in params: 70 self.correctCaptcha() 71 72 self.link = unquote(m.group(1)) 73 74 75 def handlePremium(self, pyfile): 76 if '<span class="html_download_api-gold_traffic_limit">' in self.html: 77 self.logWarning(_("Download limit reached")) 78 self.retry(25, 60 * 60, "Download limit reached") 79 80 elif 'onClick="show_gold_offer' in self.html: 81 self.account.relogin(self.user) 82 self.retry() 83 84 else: 85 link = re.search(self.LINK_PREMIUM_PATTERN, self.html) 86 mirror = re.search(self.LINK_MIRROR_PATTERN, self.html) 87 88 if link: 89 self.link = link.group(1) 90 91 elif mirror: 92 self.link = mirror.group(1) 93 94 95 getInfo = create_getInfo(DepositfilesCom) 96 [end of module/plugins/hoster/DepositfilesCom.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/module/plugins/hoster/DepositfilesCom.py b/module/plugins/hoster/DepositfilesCom.py --- a/module/plugins/hoster/DepositfilesCom.py +++ b/module/plugins/hoster/DepositfilesCom.py @@ -52,7 +52,9 @@ params = {'fid': m.group(1)} self.logDebug("FID: %s" % params['fid']) + self.setWait(60) self.wait() + recaptcha = ReCaptcha(self) captcha_key = recaptcha.detect_key() if captcha_key is None:
{"golden_diff": "diff --git a/module/plugins/hoster/DepositfilesCom.py b/module/plugins/hoster/DepositfilesCom.py\n--- a/module/plugins/hoster/DepositfilesCom.py\n+++ b/module/plugins/hoster/DepositfilesCom.py\n@@ -52,7 +52,9 @@\n params = {'fid': m.group(1)}\n self.logDebug(\"FID: %s\" % params['fid'])\n \n+ self.setWait(60)\n self.wait()\n+\n recaptcha = ReCaptcha(self)\n captcha_key = recaptcha.detect_key()\n if captcha_key is None:\n", "issue": "DepositfilesCom Parse error\nHi,\n\ndebuglog http://pastebin.com/v0SGFL9R\n\nhandleFree_line44.dump.html http://pastebin.com/2V3Rjn7M\nhandleFree_line60.dump.html http://pastebin.com/mxPsFQgk\npreload_line455.dump.html http://pastebin.com/F9vBdCct\n\nTIA\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\n\nfrom urllib import unquote\n\nfrom module.plugins.internal.CaptchaService import ReCaptcha\nfrom module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo\n\n\nclass DepositfilesCom(SimpleHoster):\n __name__ = \"DepositfilesCom\"\n __type__ = \"hoster\"\n __version__ = \"0.54\"\n\n __pattern__ = r'https?://(?:www\\.)?(depositfiles\\.com|dfiles\\.(eu|ru))(/\\w{1,3})?/files/(?P<ID>\\w+)'\n __config__ = [(\"use_premium\", \"bool\", \"Use premium account if available\", True)]\n\n __description__ = \"\"\"Depositfiles.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"spoob\", \"[email protected]\"),\n (\"zoidberg\", \"[email protected]\"),\n (\"Walter Purcaro\", \"[email protected]\")]\n\n\n NAME_PATTERN = r'<script type=\"text/javascript\">eval\\( unescape\\(\\'(?P<N>.*?)\\''\n SIZE_PATTERN = r': <b>(?P<S>[\\d.,]+)&nbsp;(?P<U>[\\w^_]+)</b>'\n OFFLINE_PATTERN = r'<span class=\"html_download_api-not_exists\"></span>'\n\n NAME_REPLACEMENTS = [(r'\\%u([0-9A-Fa-f]{4})', lambda m: unichr(int(m.group(1), 16))),\n (r'.*<b title=\"(?P<N>.+?)\".*', \"\\g<N>\")]\n URL_REPLACEMENTS = [(__pattern__ + \".*\", \"https://dfiles.eu/files/\\g<ID>\")]\n\n COOKIES = [(\"dfiles.eu\", \"lang_current\", \"en\")]\n\n WAIT_PATTERN = r'(?:download_waiter_remain\">|html_download_api-limit_interval\">|>Please wait|>Try in).+'\n ERROR_PATTER = r'File is checked, please try again in a minute'\n\n LINK_FREE_PATTERN = r'<form id=\"downloader_file_form\" action=\"(http://.+?\\.(dfiles\\.eu|depositfiles\\.com)/.+?)\" method=\"post\"'\n LINK_PREMIUM_PATTERN = r'class=\"repeat\"><a href=\"(.+?)\"'\n LINK_MIRROR_PATTERN = r'class=\"repeat_mirror\"><a href=\"(.+?)\"'\n\n\n def handleFree(self, pyfile):\n self.html = self.load(pyfile.url, post={'gateway_result': \"1\"})\n\n self.checkErrors()\n\n m = re.search(r\"var fid = '(\\w+)';\", self.html)\n if m is None:\n self.retry(wait_time=5)\n params = {'fid': m.group(1)}\n self.logDebug(\"FID: %s\" % params['fid'])\n\n self.wait()\n recaptcha = ReCaptcha(self)\n captcha_key = recaptcha.detect_key()\n if captcha_key is None:\n return\n\n self.html = self.load(\"https://dfiles.eu/get_file.php\", get=params)\n\n if '<input type=button value=\"Continue\" onclick=\"check_recaptcha' in self.html:\n params['response'], params['challenge'] = recaptcha.challenge(captcha_key)\n self.html = self.load(\"https://dfiles.eu/get_file.php\", get=params)\n\n m = re.search(self.LINK_FREE_PATTERN, self.html)\n if m:\n if 'response' in params:\n self.correctCaptcha()\n\n self.link = unquote(m.group(1))\n\n\n def handlePremium(self, pyfile):\n if '<span class=\"html_download_api-gold_traffic_limit\">' in self.html:\n self.logWarning(_(\"Download limit reached\"))\n self.retry(25, 60 * 60, \"Download limit reached\")\n\n elif 'onClick=\"show_gold_offer' in self.html:\n self.account.relogin(self.user)\n self.retry()\n\n else:\n link = re.search(self.LINK_PREMIUM_PATTERN, self.html)\n mirror = re.search(self.LINK_MIRROR_PATTERN, self.html)\n\n if link:\n self.link = link.group(1)\n\n elif mirror:\n self.link = mirror.group(1)\n\n\ngetInfo = create_getInfo(DepositfilesCom)\n", "path": "module/plugins/hoster/DepositfilesCom.py"}]}
1,781
128
gh_patches_debug_11494
rasdani/github-patches
git_diff
litestar-org__litestar-610
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: Picologging not behaving correctly **Describe the bug** i reported an earlier issue to @peterschutt about logging configuration not visible in the console output. that was fixed in https://github.com/starlite-api/starlite/pull/594 i now wanted to test this out with picologging but it seems to override the LoggingConfig settings. **To Reproduce** checkout https://github.com/starlite-api/starlite-pg-redis-docker and add picologging to the application. then you see the behaviour i describe here. nothing else changes so only add picologging. **Additional context** you can see the behaviour already at starting the project with docker-compose up. without picologging in project: app_1 | Starting Starlite App... app_1 | INFO: Will watch for changes in these directories: ['/app'] app_1 | INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit) app_1 | INFO: Started reloader process [10] using WatchFiles app_1 | INFO: Started server process [12] app_1 | INFO: Waiting for application startup. app_1 | 2022-10-16 18:38:14,888 loglevel=INFO logger=uvicorn.error serve() L75 Started server process [12] app_1 | 2022-10-16 18:38:14,888 loglevel=INFO logger=uvicorn.error startup() L47 Waiting for application startup. app_1 | INFO: Application startup complete. app_1 | 2022-10-16 18:38:14,908 loglevel=INFO logger=uvicorn.error startup() L61 Application startup complete. with picologging in project: app_1 | Starting Starlite App... app_1 | INFO: Will watch for changes in these directories: ['/app'] app_1 | INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit) app_1 | INFO: Started reloader process [10] using WatchFiles app_1 | INFO: Started server process [12] app_1 | INFO: Waiting for application startup. app_1 | INFO: Application startup complete. i tried looking at the documentation of picologging https://microsoft.github.io/picologging/handlers.html section Queue Listener but it seems to be correct. at this point im lacking knowledge in the logging department to troubleshoot this deeper. Kind regards, Niels </issue> <code> [start of starlite/logging/picologging.py] 1 import atexit 2 from logging import StreamHandler 3 from queue import Queue 4 from typing import Any, List, Optional 5 6 from starlite.exceptions import MissingDependencyException 7 from starlite.logging.utils import resolve_handlers 8 9 try: 10 from picologging.handlers import QueueHandler, QueueListener 11 except ImportError as e: 12 raise MissingDependencyException("picologging is not installed") from e 13 14 15 class QueueListenerHandler(QueueHandler): # type: ignore[misc] 16 def __init__(self, handlers: Optional[List[Any]] = None) -> None: 17 """Configures queue listener and handler to support non-blocking 18 logging configuration. 19 20 Args: 21 handlers: Optional 'ConvertingList' 22 23 Notes: 24 - Requires `picologging` to be installed. 25 """ 26 super().__init__(Queue(-1)) 27 if handlers: 28 handlers = resolve_handlers(handlers) 29 else: 30 handlers = [StreamHandler()] 31 self.listener = QueueListener(self.queue, *handlers) 32 self.listener.start() 33 34 atexit.register(self.listener.stop) 35 [end of starlite/logging/picologging.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/starlite/logging/picologging.py b/starlite/logging/picologging.py --- a/starlite/logging/picologging.py +++ b/starlite/logging/picologging.py @@ -1,5 +1,4 @@ import atexit -from logging import StreamHandler from queue import Queue from typing import Any, List, Optional @@ -7,6 +6,7 @@ from starlite.logging.utils import resolve_handlers try: + from picologging import StreamHandler from picologging.handlers import QueueHandler, QueueListener except ImportError as e: raise MissingDependencyException("picologging is not installed") from e
{"golden_diff": "diff --git a/starlite/logging/picologging.py b/starlite/logging/picologging.py\n--- a/starlite/logging/picologging.py\n+++ b/starlite/logging/picologging.py\n@@ -1,5 +1,4 @@\n import atexit\n-from logging import StreamHandler\n from queue import Queue\n from typing import Any, List, Optional\n \n@@ -7,6 +6,7 @@\n from starlite.logging.utils import resolve_handlers\n \n try:\n+ from picologging import StreamHandler\n from picologging.handlers import QueueHandler, QueueListener\n except ImportError as e:\n raise MissingDependencyException(\"picologging is not installed\") from e\n", "issue": "Bug: Picologging not behaving correctly \n**Describe the bug**\r\ni reported an earlier issue to @peterschutt about logging configuration not visible in the console output.\r\nthat was fixed in https://github.com/starlite-api/starlite/pull/594\r\n\r\ni now wanted to test this out with picologging but it seems to override the LoggingConfig settings.\r\n\r\n**To Reproduce**\r\ncheckout https://github.com/starlite-api/starlite-pg-redis-docker and add picologging to the application.\r\nthen you see the behaviour i describe here.\r\n\r\nnothing else changes so only add picologging.\r\n\r\n**Additional context**\r\nyou can see the behaviour already at starting the project with docker-compose up.\r\n\r\nwithout picologging in project:\r\n\r\napp_1 | Starting Starlite App...\r\napp_1 | INFO: Will watch for changes in these directories: ['/app']\r\napp_1 | INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)\r\napp_1 | INFO: Started reloader process [10] using WatchFiles\r\napp_1 | INFO: Started server process [12]\r\napp_1 | INFO: Waiting for application startup.\r\napp_1 | 2022-10-16 18:38:14,888 loglevel=INFO logger=uvicorn.error serve() L75 Started server process [12]\r\napp_1 | 2022-10-16 18:38:14,888 loglevel=INFO logger=uvicorn.error startup() L47 Waiting for application startup.\r\napp_1 | INFO: Application startup complete.\r\napp_1 | 2022-10-16 18:38:14,908 loglevel=INFO logger=uvicorn.error startup() L61 Application startup complete.\r\n\r\nwith picologging in project:\r\n\r\napp_1 | Starting Starlite App...\r\napp_1 | INFO: Will watch for changes in these directories: ['/app']\r\napp_1 | INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)\r\napp_1 | INFO: Started reloader process [10] using WatchFiles\r\napp_1 | INFO: Started server process [12]\r\napp_1 | INFO: Waiting for application startup.\r\napp_1 | INFO: Application startup complete.\r\n\r\n\r\ni tried looking at the documentation of picologging https://microsoft.github.io/picologging/handlers.html section Queue Listener but it seems to be correct. at this point im lacking knowledge in the logging department to troubleshoot this deeper.\r\n\r\nKind regards,\r\nNiels\r\n\n", "before_files": [{"content": "import atexit\nfrom logging import StreamHandler\nfrom queue import Queue\nfrom typing import Any, List, Optional\n\nfrom starlite.exceptions import MissingDependencyException\nfrom starlite.logging.utils import resolve_handlers\n\ntry:\n from picologging.handlers import QueueHandler, QueueListener\nexcept ImportError as e:\n raise MissingDependencyException(\"picologging is not installed\") from e\n\n\nclass QueueListenerHandler(QueueHandler): # type: ignore[misc]\n def __init__(self, handlers: Optional[List[Any]] = None) -> None:\n \"\"\"Configures queue listener and handler to support non-blocking\n logging configuration.\n\n Args:\n handlers: Optional 'ConvertingList'\n\n Notes:\n - Requires `picologging` to be installed.\n \"\"\"\n super().__init__(Queue(-1))\n if handlers:\n handlers = resolve_handlers(handlers)\n else:\n handlers = [StreamHandler()]\n self.listener = QueueListener(self.queue, *handlers)\n self.listener.start()\n\n atexit.register(self.listener.stop)\n", "path": "starlite/logging/picologging.py"}]}
1,440
144
gh_patches_debug_34436
rasdani/github-patches
git_diff
OCHA-DAP__hdx-ckan-1720
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ebola Page: title case, please For example, change `Appeal coverage` to `Appeal Coverage` ![image](https://cloud.githubusercontent.com/assets/1654485/4974867/41867a20-68d1-11e4-92c9-e8956b974d8d.png) </issue> <code> [start of ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py] 1 ''' 2 Created on Nov 3, 2014 3 4 @author: alexandru-m-g 5 ''' 6 7 import datetime as dt 8 9 import pylons.config as config 10 import logging 11 12 import ckan.lib.base as base 13 import ckan.logic as logic 14 import ckan.model as model 15 import ckan.common as common 16 import ckan.lib.helpers as h 17 18 render = base.render 19 get_action = logic.get_action 20 c = common.c 21 request = common.request 22 23 log = logging.getLogger(__name__) 24 25 26 class CrisisController(base.BaseController): 27 28 def show(self): 29 30 context = {'model': model, 'session': model.Session, 31 'user': c.user or c.author, 'for_view': True, 32 'auth_user_obj': c.userobj} 33 34 datastore_resource_id = self._get_datastore_resource_id( 35 context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None)) 36 if datastore_resource_id: 37 c.top_line_items = self._get_top_line_items( 38 context, datastore_resource_id) 39 40 limit = 25 41 c.q = u'ebola' 42 43 page = int(request.params.get('page', 1)) 44 data_dict = {'sort': u'metadata_modified desc', 45 'fq': '+dataset_type:dataset', 46 'rows': limit, 47 'q': c.q, 48 'start': (page - 1) * limit 49 } 50 query = get_action("package_search")(context, data_dict) 51 52 def pager_url(q=None, page=None): 53 return h.url_for('show_crisis', page=page) 54 55 c.page = h.Page( 56 collection=query['results'], 57 page=page, 58 url=pager_url, 59 item_count=query['count'], 60 items_per_page=limit 61 ) 62 c.items = query['results'] 63 c.item_count = query['count'] 64 65 c.other_links = {} 66 c.other_links['show_more'] = h.url_for( 67 "search", **{'q': u'ebola', 'sort': u'metadata_modified desc', 68 'ext_indicator': '0'}) 69 70 return render('crisis/crisis.html') 71 72 def _get_top_line_items(self, context, datastore_resource_id): 73 modified_context = dict(context) 74 modified_context['ignore_auth'] = True 75 result = get_action('datastore_search')( 76 modified_context, {'resource_id': datastore_resource_id}) 77 if 'records' in result: 78 for r in result['records']: 79 d = dt.datetime.strptime( 80 r[u'latest_date'], '%Y-%m-%dT%H:%M:%S') 81 r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y') 82 int_value = int(r[u'value']) 83 if int_value == r[u'value']: 84 r[u'value'] = '{:,}'.format(int_value) 85 else: 86 r[u'value'] = '{:,}'.format(r[u'value']) 87 return result['records'] 88 return [] 89 90 def _get_datastore_resource_id(self, context, dataset_id, resource_name): 91 try: 92 modified_context = dict(context) 93 modified_context['ignore_auth'] = True 94 dataset = get_action('package_show')( 95 modified_context, {'id': dataset_id}) 96 97 if 'resources' in dataset: 98 for r in dataset['resources']: 99 if 'datastore_active' in r and r['datastore_active'] \ 100 and r['name'] == resource_name: 101 return r['id'] 102 return None 103 except: 104 log.warning('No dataset with id ' + dataset_id) 105 return None 106 [end of ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py] [start of ckanext-hdx_theme/ckanext/hdx_theme/version.py] 1 hdx_version = 'v0.4.7' 2 [end of ckanext-hdx_theme/ckanext/hdx_theme/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py --- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py +++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py @@ -19,6 +19,7 @@ get_action = logic.get_action c = common.c request = common.request +_ = common._ log = logging.getLogger(__name__) @@ -69,21 +70,34 @@ return render('crisis/crisis.html') + def _format_results(self, result): + for r in result['records']: + d = dt.datetime.strptime(r[u'latest_date'], '%Y-%m-%dT%H:%M:%S') + r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y') + + modified_value = r[u'value'] + if r[u'units'] == 'ratio': + modified_value *= 100.0 + elif r[u'units'] == 'million': + modified_value /= 1000000.0 + + int_value = int(modified_value) + if int_value == modified_value: + r[u'formatted_value'] = '{:,}'.format(int_value) + else: + if r[u'units'] == 'ratio': + r[u'formatted_value'] = '{:,.1f}%'.format(modified_value) + elif r[u'units'] == 'million': + r[u'formatted_value'] = '{:,.1f} '.format(modified_value) + r[u'formatted_value'] += ' ' + _('million') + def _get_top_line_items(self, context, datastore_resource_id): modified_context = dict(context) modified_context['ignore_auth'] = True result = get_action('datastore_search')( modified_context, {'resource_id': datastore_resource_id}) if 'records' in result: - for r in result['records']: - d = dt.datetime.strptime( - r[u'latest_date'], '%Y-%m-%dT%H:%M:%S') - r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y') - int_value = int(r[u'value']) - if int_value == r[u'value']: - r[u'value'] = '{:,}'.format(int_value) - else: - r[u'value'] = '{:,}'.format(r[u'value']) + self._format_results(result) return result['records'] return [] diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py --- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py @@ -1 +1 @@ -hdx_version = 'v0.4.7' +hdx_version = 'v0.4.8'
{"golden_diff": "diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n--- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n+++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n@@ -19,6 +19,7 @@\n get_action = logic.get_action\n c = common.c\n request = common.request\n+_ = common._\n \n log = logging.getLogger(__name__)\n \n@@ -69,21 +70,34 @@\n \n return render('crisis/crisis.html')\n \n+ def _format_results(self, result):\n+ for r in result['records']:\n+ d = dt.datetime.strptime(r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')\n+ r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')\n+\n+ modified_value = r[u'value']\n+ if r[u'units'] == 'ratio':\n+ modified_value *= 100.0\n+ elif r[u'units'] == 'million':\n+ modified_value /= 1000000.0\n+\n+ int_value = int(modified_value)\n+ if int_value == modified_value:\n+ r[u'formatted_value'] = '{:,}'.format(int_value)\n+ else:\n+ if r[u'units'] == 'ratio':\n+ r[u'formatted_value'] = '{:,.1f}%'.format(modified_value)\n+ elif r[u'units'] == 'million':\n+ r[u'formatted_value'] = '{:,.1f} '.format(modified_value)\n+ r[u'formatted_value'] += ' ' + _('million')\n+\n def _get_top_line_items(self, context, datastore_resource_id):\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n result = get_action('datastore_search')(\n modified_context, {'resource_id': datastore_resource_id})\n if 'records' in result:\n- for r in result['records']:\n- d = dt.datetime.strptime(\n- r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')\n- r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')\n- int_value = int(r[u'value'])\n- if int_value == r[u'value']:\n- r[u'value'] = '{:,}'.format(int_value)\n- else:\n- r[u'value'] = '{:,}'.format(r[u'value'])\n+ self._format_results(result)\n return result['records']\n return []\n \ndiff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.4.7'\n+hdx_version = 'v0.4.8'\n", "issue": "Ebola Page: title case, please\nFor example, change `Appeal coverage` to `Appeal Coverage`\n\n![image](https://cloud.githubusercontent.com/assets/1654485/4974867/41867a20-68d1-11e4-92c9-e8956b974d8d.png)\n\n", "before_files": [{"content": "'''\nCreated on Nov 3, 2014\n\n@author: alexandru-m-g\n'''\n\nimport datetime as dt\n\nimport pylons.config as config\nimport logging\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.lib.helpers as h\n\nrender = base.render\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n\nlog = logging.getLogger(__name__)\n\n\nclass CrisisController(base.BaseController):\n\n def show(self):\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj}\n\n datastore_resource_id = self._get_datastore_resource_id(\n context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))\n if datastore_resource_id:\n c.top_line_items = self._get_top_line_items(\n context, datastore_resource_id)\n\n limit = 25\n c.q = u'ebola'\n\n page = int(request.params.get('page', 1))\n data_dict = {'sort': u'metadata_modified desc',\n 'fq': '+dataset_type:dataset',\n 'rows': limit,\n 'q': c.q,\n 'start': (page - 1) * limit\n }\n query = get_action(\"package_search\")(context, data_dict)\n\n def pager_url(q=None, page=None):\n return h.url_for('show_crisis', page=page)\n\n c.page = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit\n )\n c.items = query['results']\n c.item_count = query['count']\n\n c.other_links = {}\n c.other_links['show_more'] = h.url_for(\n \"search\", **{'q': u'ebola', 'sort': u'metadata_modified desc',\n 'ext_indicator': '0'})\n\n return render('crisis/crisis.html')\n\n def _get_top_line_items(self, context, datastore_resource_id):\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n result = get_action('datastore_search')(\n modified_context, {'resource_id': datastore_resource_id})\n if 'records' in result:\n for r in result['records']:\n d = dt.datetime.strptime(\n r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')\n r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')\n int_value = int(r[u'value'])\n if int_value == r[u'value']:\n r[u'value'] = '{:,}'.format(int_value)\n else:\n r[u'value'] = '{:,}'.format(r[u'value'])\n return result['records']\n return []\n\n def _get_datastore_resource_id(self, context, dataset_id, resource_name):\n try:\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n dataset = get_action('package_show')(\n modified_context, {'id': dataset_id})\n\n if 'resources' in dataset:\n for r in dataset['resources']:\n if 'datastore_active' in r and r['datastore_active'] \\\n and r['name'] == resource_name:\n return r['id']\n return None\n except:\n log.warning('No dataset with id ' + dataset_id)\n return None\n", "path": "ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py"}, {"content": "hdx_version = 'v0.4.7'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]}
1,711
727
gh_patches_debug_31493
rasdani/github-patches
git_diff
akvo__akvo-rsr-2120
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Employment application requires Django login ## Test plan GIVEN the `/rest/v1/user/../request_organisation/` endpoint WHEN POSTing a request from outside of RSR AND the correct authentication details are provided THEN the request should be process accordingly, instead trying to authenticate through Django's auth system ## Issue description Just as for #2037, when posting a request for employment to the /rest/v1/user/request_organisation/ endpoint, a Django login is required, which is not the case for the rest of the API; an API key should be sufficient. </issue> <code> [start of akvo/rest/views/user.py] 1 # -*- coding: utf-8 -*- 2 """Akvo RSR is covered by the GNU Affero General Public License. 3 4 See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 """ 7 8 from django.contrib.auth import get_user_model 9 from django.db import IntegrityError 10 from django.utils.translation import ugettext_lazy as _ 11 12 from rest_framework import status 13 from rest_framework.decorators import api_view, permission_classes 14 from rest_framework.exceptions import PermissionDenied 15 from rest_framework.response import Response 16 17 from ...rsr.models import Country, Employment, Organisation 18 from ..viewsets import BaseRSRViewSet 19 from ..serializers import EmploymentSerializer, OrganisationSerializer, CountrySerializer 20 from ..serializers import UserSerializer, UserDetailsSerializer, UserPasswordSerializer 21 22 23 class UserViewSet(BaseRSRViewSet): 24 25 """User resource.""" 26 27 queryset = get_user_model().objects.select_related( 28 'organisation', 29 'organisation__primary_location', 30 'organisation__primary_location__country', 31 'organisation__primary_location__location_target', 32 'organisation__primary_location__location_target__internal_org_ids', 33 ).prefetch_related( 34 'organisations', 35 'organisations__primary_location', 36 'organisations__primary_location__country', 37 'organisations__primary_location__location_target',) 38 serializer_class = UserSerializer 39 filter_fields = ('username', 'email', 'first_name', 'last_name', 'is_active', 'is_staff', 40 'is_admin') 41 42 43 @api_view(['POST']) 44 def change_password(request, pk=None): 45 # Get the user, or return an error if the user does not exist 46 try: 47 user = get_user_model().objects.get(pk=pk) 48 except get_user_model().DoesNotExist: 49 return Response({'user': _('User does not exist')}, status=status.HTTP_400_BAD_REQUEST) 50 51 # Users are only allowed to edit their own details 52 request_user = getattr(request, 'user', None) 53 if not user == request_user: 54 raise PermissionDenied() 55 56 # Process request 57 serializer = UserPasswordSerializer(data=request.DATA, instance=user) 58 if serializer.is_valid(): 59 user.set_password(serializer.data['new_password2']) 60 user.save() 61 return Response({'status': 'password set'}) 62 else: 63 return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) 64 65 66 @api_view(['POST']) 67 def update_details(request, pk=None): 68 # Get the user, or return an error if the user does not exist 69 try: 70 user = get_user_model().objects.get(pk=pk) 71 except get_user_model().DoesNotExist: 72 return Response({'user': _('User does not exist')}, status=status.HTTP_400_BAD_REQUEST) 73 74 # Users are only allowed to edit their own details 75 request_user = getattr(request, 'user', None) 76 if not user == request_user: 77 raise PermissionDenied() 78 79 # Process request 80 serializer = UserDetailsSerializer(data=request.DATA, instance=user) 81 if serializer.is_valid(): 82 user.first_name = serializer.data['first_name'] 83 user.last_name = serializer.data['last_name'] 84 user.save() 85 return Response(request.DATA) 86 else: 87 return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) 88 89 90 @api_view(['POST']) 91 def request_organisation(request, pk=None): 92 # Get the user, or return an error if the user does not exist 93 try: 94 user = get_user_model().objects.get(pk=pk) 95 except get_user_model().DoesNotExist: 96 return Response({'user': _('User does not exist')}, status=status.HTTP_400_BAD_REQUEST) 97 98 # Users themselves are only allowed to request to join an organisation 99 request_user = getattr(request, 'user', None) 100 if not user == request_user: 101 raise PermissionDenied() 102 request.DATA['user'] = pk 103 104 # Process request 105 serializer = EmploymentSerializer(data=request.DATA) 106 if serializer.is_valid(): 107 try: 108 organisation = Organisation.objects.get(pk=serializer.data['organisation']) 109 if serializer.data['country']: 110 country = Country.objects.get(pk=serializer.data['country']) 111 else: 112 country = None 113 employment = Employment( 114 user=user, 115 organisation=organisation, 116 country=country, 117 job_title=serializer.data['job_title'], 118 is_approved=False, 119 ) 120 employment.save() 121 except IntegrityError: 122 return Response({'detail': _(u'User already linked to this organisation')}, 123 status=status.HTTP_409_CONFLICT) 124 125 serializer.data['country_full'] = CountrySerializer(country).data 126 serializer.data['organisation_full'] = OrganisationSerializer(organisation).data 127 serializer.data['id'] = employment.pk 128 129 return Response(serializer.data) 130 else: 131 return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) 132 [end of akvo/rest/views/user.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rest/views/user.py b/akvo/rest/views/user.py --- a/akvo/rest/views/user.py +++ b/akvo/rest/views/user.py @@ -10,10 +10,12 @@ from django.utils.translation import ugettext_lazy as _ from rest_framework import status -from rest_framework.decorators import api_view, permission_classes +from rest_framework.authentication import SessionAuthentication +from rest_framework.decorators import api_view, permission_classes, authentication_classes from rest_framework.exceptions import PermissionDenied from rest_framework.response import Response +from akvo.rest.models import TastyTokenAuthentication from ...rsr.models import Country, Employment, Organisation from ..viewsets import BaseRSRViewSet from ..serializers import EmploymentSerializer, OrganisationSerializer, CountrySerializer @@ -88,16 +90,19 @@ @api_view(['POST']) +@authentication_classes([SessionAuthentication, TastyTokenAuthentication]) def request_organisation(request, pk=None): + # Get the user, or return an error if the user does not exist try: - user = get_user_model().objects.get(pk=pk) + user_by_pk = get_user_model().objects.get(pk=pk) except get_user_model().DoesNotExist: return Response({'user': _('User does not exist')}, status=status.HTTP_400_BAD_REQUEST) + # request.user is the user identified by the auth token + user = request.user # Users themselves are only allowed to request to join an organisation - request_user = getattr(request, 'user', None) - if not user == request_user: + if not user_by_pk == request.user: raise PermissionDenied() request.DATA['user'] = pk
{"golden_diff": "diff --git a/akvo/rest/views/user.py b/akvo/rest/views/user.py\n--- a/akvo/rest/views/user.py\n+++ b/akvo/rest/views/user.py\n@@ -10,10 +10,12 @@\n from django.utils.translation import ugettext_lazy as _\n \n from rest_framework import status\n-from rest_framework.decorators import api_view, permission_classes\n+from rest_framework.authentication import SessionAuthentication\n+from rest_framework.decorators import api_view, permission_classes, authentication_classes\n from rest_framework.exceptions import PermissionDenied\n from rest_framework.response import Response\n \n+from akvo.rest.models import TastyTokenAuthentication\n from ...rsr.models import Country, Employment, Organisation\n from ..viewsets import BaseRSRViewSet\n from ..serializers import EmploymentSerializer, OrganisationSerializer, CountrySerializer\n@@ -88,16 +90,19 @@\n \n \n @api_view(['POST'])\n+@authentication_classes([SessionAuthentication, TastyTokenAuthentication])\n def request_organisation(request, pk=None):\n+\n # Get the user, or return an error if the user does not exist\n try:\n- user = get_user_model().objects.get(pk=pk)\n+ user_by_pk = get_user_model().objects.get(pk=pk)\n except get_user_model().DoesNotExist:\n return Response({'user': _('User does not exist')}, status=status.HTTP_400_BAD_REQUEST)\n \n+ # request.user is the user identified by the auth token\n+ user = request.user\n # Users themselves are only allowed to request to join an organisation\n- request_user = getattr(request, 'user', None)\n- if not user == request_user:\n+ if not user_by_pk == request.user:\n raise PermissionDenied()\n request.DATA['user'] = pk\n", "issue": "Employment application requires Django login\n## Test plan\n\nGIVEN the `/rest/v1/user/../request_organisation/` endpoint\nWHEN POSTing a request from outside of RSR\nAND the correct authentication details are provided\nTHEN the request should be process accordingly, instead trying to authenticate through Django's auth system\n## Issue description\n\nJust as for #2037, when posting a request for employment to the /rest/v1/user/request_organisation/ endpoint, a Django login is required, which is not the case for the rest of the API; an API key should be sufficient.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import IntegrityError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.response import Response\n\nfrom ...rsr.models import Country, Employment, Organisation\nfrom ..viewsets import BaseRSRViewSet\nfrom ..serializers import EmploymentSerializer, OrganisationSerializer, CountrySerializer\nfrom ..serializers import UserSerializer, UserDetailsSerializer, UserPasswordSerializer\n\n\nclass UserViewSet(BaseRSRViewSet):\n\n \"\"\"User resource.\"\"\"\n\n queryset = get_user_model().objects.select_related(\n 'organisation',\n 'organisation__primary_location',\n 'organisation__primary_location__country',\n 'organisation__primary_location__location_target',\n 'organisation__primary_location__location_target__internal_org_ids',\n ).prefetch_related(\n 'organisations',\n 'organisations__primary_location',\n 'organisations__primary_location__country',\n 'organisations__primary_location__location_target',)\n serializer_class = UserSerializer\n filter_fields = ('username', 'email', 'first_name', 'last_name', 'is_active', 'is_staff',\n 'is_admin')\n\n\n@api_view(['POST'])\ndef change_password(request, pk=None):\n # Get the user, or return an error if the user does not exist\n try:\n user = get_user_model().objects.get(pk=pk)\n except get_user_model().DoesNotExist:\n return Response({'user': _('User does not exist')}, status=status.HTTP_400_BAD_REQUEST)\n\n # Users are only allowed to edit their own details\n request_user = getattr(request, 'user', None)\n if not user == request_user:\n raise PermissionDenied()\n\n # Process request\n serializer = UserPasswordSerializer(data=request.DATA, instance=user)\n if serializer.is_valid():\n user.set_password(serializer.data['new_password2'])\n user.save()\n return Response({'status': 'password set'})\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['POST'])\ndef update_details(request, pk=None):\n # Get the user, or return an error if the user does not exist\n try:\n user = get_user_model().objects.get(pk=pk)\n except get_user_model().DoesNotExist:\n return Response({'user': _('User does not exist')}, status=status.HTTP_400_BAD_REQUEST)\n\n # Users are only allowed to edit their own details\n request_user = getattr(request, 'user', None)\n if not user == request_user:\n raise PermissionDenied()\n\n # Process request\n serializer = UserDetailsSerializer(data=request.DATA, instance=user)\n if serializer.is_valid():\n user.first_name = serializer.data['first_name']\n user.last_name = serializer.data['last_name']\n user.save()\n return Response(request.DATA)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['POST'])\ndef request_organisation(request, pk=None):\n # Get the user, or return an error if the user does not exist\n try:\n user = get_user_model().objects.get(pk=pk)\n except get_user_model().DoesNotExist:\n return Response({'user': _('User does not exist')}, status=status.HTTP_400_BAD_REQUEST)\n\n # Users themselves are only allowed to request to join an organisation\n request_user = getattr(request, 'user', None)\n if not user == request_user:\n raise PermissionDenied()\n request.DATA['user'] = pk\n\n # Process request\n serializer = EmploymentSerializer(data=request.DATA)\n if serializer.is_valid():\n try:\n organisation = Organisation.objects.get(pk=serializer.data['organisation'])\n if serializer.data['country']:\n country = Country.objects.get(pk=serializer.data['country'])\n else:\n country = None\n employment = Employment(\n user=user,\n organisation=organisation,\n country=country,\n job_title=serializer.data['job_title'],\n is_approved=False,\n )\n employment.save()\n except IntegrityError:\n return Response({'detail': _(u'User already linked to this organisation')},\n status=status.HTTP_409_CONFLICT)\n\n serializer.data['country_full'] = CountrySerializer(country).data\n serializer.data['organisation_full'] = OrganisationSerializer(organisation).data\n serializer.data['id'] = employment.pk\n\n return Response(serializer.data)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n", "path": "akvo/rest/views/user.py"}]}
1,991
377
gh_patches_debug_14296
rasdani/github-patches
git_diff
DataDog__dd-trace-py-2384
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> grpc client fails when parse_method_path() does not return a value for package ### Which version of dd-trace-py are you using? ddtrace==0.48.0 ### Which version of pip are you using? pip 21.1 ### Which version of the libraries are you using? ddtrace==0.48.0 ### How can we reproduce your problem? parse_method_path() can [return None](https://github.com/DataDog/dd-trace-py/blob/7f817089fac3efc2d1204d347e5d21abe344445c/ddtrace/contrib/grpc/utils.py#L12) for the `package` value. This is then passed to [span._set_str_tag()](https://github.com/DataDog/dd-trace-py/blob/7f817089fac3efc2d1204d347e5d21abe344445c/ddtrace/contrib/grpc/client_interceptor.py#L184), which expects the value to always be `str`. ### What is the result that you get? ```2021-04-29 12:50:27 [6] [ERROR] grpc._server Exception calling application: not expecting type '<class 'NoneType'>' Traceback (most recent call last): File "/pyenv/versions/app_venv/lib/python3.7/site-packages/grpc/_server.py", line 435, in _call_behavior response_or_iterator = behavior(argument, context) File "/pyenv/versions/app_venv/lib/python3.7/site-packages/ddtrace/contrib/grpc/server_interceptor.py", line 112, in unary_unary return self._fn(constants.GRPC_METHOD_KIND_UNARY, self.__wrapped__.unary_unary, args, kwargs) File "/pyenv/versions/app_venv/lib/python3.7/site-packages/ddtrace/contrib/grpc/server_interceptor.py", line 79, in _fn span._set_str_tag(constants.GRPC_METHOD_PACKAGE_KEY, method_package) File "/pyenv/versions/app_venv/lib/python3.7/site-packages/ddtrace/span.py", line 318, in _set_str_tag self.meta[key] = ensure_text(value, errors="replace") File "/pyenv/versions/app_venv/lib/python3.7/site-packages/ddtrace/vendor/six/__init__.py", line 937, in ensure_text raise TypeError("not expecting type '%s'" % type(s)) TypeError: not expecting type '<class 'NoneType'>' ``` ### What is the result that you expected? The interceptor does not raise exceptions when the value is not available. The issue seems to be surfaces as a result of #2218. it's not clear if the method package should be omitted or converted to a string as done in some of the other integrations edited in that PR. </issue> <code> [start of ddtrace/contrib/grpc/utils.py] 1 from . import constants 2 3 4 def parse_method_path(method_path): 5 """ Returns (package, service, method) tuple from parsing method path """ 6 # unpack method path based on "/{package}.{service}/{method}" 7 # first remove leading "/" as unnecessary 8 package_service, method_name = method_path.lstrip("/").rsplit("/", 1) 9 10 # {package} is optional 11 package_service = package_service.rsplit(".", 1) 12 if len(package_service) == 2: 13 return package_service[0], package_service[1], method_name 14 15 return None, package_service[0], method_name 16 17 18 def set_grpc_method_meta(span, method, method_kind): 19 method_path = method 20 method_package, method_service, method_name = parse_method_path(method_path) 21 span._set_str_tag(constants.GRPC_METHOD_PATH_KEY, method_path) 22 span._set_str_tag(constants.GRPC_METHOD_PACKAGE_KEY, method_package) 23 span._set_str_tag(constants.GRPC_METHOD_SERVICE_KEY, method_service) 24 span._set_str_tag(constants.GRPC_METHOD_NAME_KEY, method_name) 25 span._set_str_tag(constants.GRPC_METHOD_KIND_KEY, method_kind) 26 [end of ddtrace/contrib/grpc/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ddtrace/contrib/grpc/utils.py b/ddtrace/contrib/grpc/utils.py --- a/ddtrace/contrib/grpc/utils.py +++ b/ddtrace/contrib/grpc/utils.py @@ -19,7 +19,8 @@ method_path = method method_package, method_service, method_name = parse_method_path(method_path) span._set_str_tag(constants.GRPC_METHOD_PATH_KEY, method_path) - span._set_str_tag(constants.GRPC_METHOD_PACKAGE_KEY, method_package) + if method_package is not None: + span._set_str_tag(constants.GRPC_METHOD_PACKAGE_KEY, method_package) span._set_str_tag(constants.GRPC_METHOD_SERVICE_KEY, method_service) span._set_str_tag(constants.GRPC_METHOD_NAME_KEY, method_name) span._set_str_tag(constants.GRPC_METHOD_KIND_KEY, method_kind)
{"golden_diff": "diff --git a/ddtrace/contrib/grpc/utils.py b/ddtrace/contrib/grpc/utils.py\n--- a/ddtrace/contrib/grpc/utils.py\n+++ b/ddtrace/contrib/grpc/utils.py\n@@ -19,7 +19,8 @@\n method_path = method\n method_package, method_service, method_name = parse_method_path(method_path)\n span._set_str_tag(constants.GRPC_METHOD_PATH_KEY, method_path)\n- span._set_str_tag(constants.GRPC_METHOD_PACKAGE_KEY, method_package)\n+ if method_package is not None:\n+ span._set_str_tag(constants.GRPC_METHOD_PACKAGE_KEY, method_package)\n span._set_str_tag(constants.GRPC_METHOD_SERVICE_KEY, method_service)\n span._set_str_tag(constants.GRPC_METHOD_NAME_KEY, method_name)\n span._set_str_tag(constants.GRPC_METHOD_KIND_KEY, method_kind)\n", "issue": "grpc client fails when parse_method_path() does not return a value for package\n### Which version of dd-trace-py are you using?\r\n\r\nddtrace==0.48.0\r\n\r\n### Which version of pip are you using?\r\n\r\npip 21.1\r\n\r\n### Which version of the libraries are you using?\r\n\r\nddtrace==0.48.0\r\n\r\n### How can we reproduce your problem?\r\n\r\nparse_method_path() can [return None](https://github.com/DataDog/dd-trace-py/blob/7f817089fac3efc2d1204d347e5d21abe344445c/ddtrace/contrib/grpc/utils.py#L12) for the `package` value. This is then passed to [span._set_str_tag()](https://github.com/DataDog/dd-trace-py/blob/7f817089fac3efc2d1204d347e5d21abe344445c/ddtrace/contrib/grpc/client_interceptor.py#L184), which expects the value to always be `str`.\r\n\r\n### What is the result that you get?\r\n```2021-04-29 12:50:27 [6] [ERROR] grpc._server Exception calling application: not expecting type '<class 'NoneType'>'\r\nTraceback (most recent call last):\r\n File \"/pyenv/versions/app_venv/lib/python3.7/site-packages/grpc/_server.py\", line 435, in _call_behavior\r\n response_or_iterator = behavior(argument, context)\r\n File \"/pyenv/versions/app_venv/lib/python3.7/site-packages/ddtrace/contrib/grpc/server_interceptor.py\", line 112, in unary_unary\r\n return self._fn(constants.GRPC_METHOD_KIND_UNARY, self.__wrapped__.unary_unary, args, kwargs)\r\n File \"/pyenv/versions/app_venv/lib/python3.7/site-packages/ddtrace/contrib/grpc/server_interceptor.py\", line 79, in _fn\r\n span._set_str_tag(constants.GRPC_METHOD_PACKAGE_KEY, method_package)\r\n File \"/pyenv/versions/app_venv/lib/python3.7/site-packages/ddtrace/span.py\", line 318, in _set_str_tag\r\n self.meta[key] = ensure_text(value, errors=\"replace\")\r\n File \"/pyenv/versions/app_venv/lib/python3.7/site-packages/ddtrace/vendor/six/__init__.py\", line 937, in ensure_text\r\n raise TypeError(\"not expecting type '%s'\" % type(s))\r\nTypeError: not expecting type '<class 'NoneType'>'\r\n```\r\n\r\n### What is the result that you expected?\r\nThe interceptor does not raise exceptions when the value is not available.\r\n\r\nThe issue seems to be surfaces as a result of #2218. it's not clear if the method package should be omitted or converted to a string as done in some of the other integrations edited in that PR.\n", "before_files": [{"content": "from . import constants\n\n\ndef parse_method_path(method_path):\n \"\"\" Returns (package, service, method) tuple from parsing method path \"\"\"\n # unpack method path based on \"/{package}.{service}/{method}\"\n # first remove leading \"/\" as unnecessary\n package_service, method_name = method_path.lstrip(\"/\").rsplit(\"/\", 1)\n\n # {package} is optional\n package_service = package_service.rsplit(\".\", 1)\n if len(package_service) == 2:\n return package_service[0], package_service[1], method_name\n\n return None, package_service[0], method_name\n\n\ndef set_grpc_method_meta(span, method, method_kind):\n method_path = method\n method_package, method_service, method_name = parse_method_path(method_path)\n span._set_str_tag(constants.GRPC_METHOD_PATH_KEY, method_path)\n span._set_str_tag(constants.GRPC_METHOD_PACKAGE_KEY, method_package)\n span._set_str_tag(constants.GRPC_METHOD_SERVICE_KEY, method_service)\n span._set_str_tag(constants.GRPC_METHOD_NAME_KEY, method_name)\n span._set_str_tag(constants.GRPC_METHOD_KIND_KEY, method_kind)\n", "path": "ddtrace/contrib/grpc/utils.py"}]}
1,488
182
gh_patches_debug_7138
rasdani/github-patches
git_diff
e-valuation__EvaP-354
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Result detail pages need to show total grades and participation The `results/course_detail` pages should again show the information that is already given on the overview page (average and medium grades and participation). </issue> <code> [start of evap/results/views.py] 1 from django.conf import settings 2 from django.http import HttpResponse 3 from django.shortcuts import get_object_or_404, render_to_response 4 from django.template import RequestContext 5 from django.utils.translation import get_language 6 7 from evap.evaluation.auth import login_required, fsr_required 8 from evap.evaluation.models import Semester 9 from evap.evaluation.tools import calculate_results, calculate_average_and_medium_grades, TextResult, can_publish_grades 10 11 from evap.results.exporters import ExcelExporter 12 13 14 @login_required 15 def index(request): 16 semesters = Semester.get_all_with_published_courses() 17 18 return render_to_response( 19 "results_index.html", 20 dict(semesters=semesters), 21 context_instance=RequestContext(request)) 22 23 24 @login_required 25 def semester_detail(request, semester_id): 26 semester = get_object_or_404(Semester, id=semester_id) 27 courses = list(semester.course_set.filter(state="published")) 28 29 # annotate each course object with its grades 30 for course in courses: 31 # first, make sure that there are no preexisting grade attributes 32 course.avg_grade, course.med_grade = calculate_average_and_medium_grades(course) 33 course.can_publish_grades = can_publish_grades(course, request.user.is_staff) 34 35 return render_to_response( 36 "results_semester_detail.html", 37 dict( 38 semester=semester, 39 courses=courses 40 ), 41 context_instance=RequestContext(request)) 42 43 44 @fsr_required 45 def semester_export(request, semester_id): 46 semester = get_object_or_404(Semester, id=semester_id) 47 48 filename = "Evaluation-%s-%s.xls" % (semester.name, get_language()) 49 50 response = HttpResponse(mimetype="application/vnd.ms-excel") 51 response["Content-Disposition"] = "attachment; filename=\"%s\"" % filename 52 53 exporter = ExcelExporter(semester) 54 55 if 'all' in request.GET: 56 exporter.export(response, True) 57 else: 58 exporter.export(response) 59 60 return response 61 62 63 @login_required 64 def course_detail(request, semester_id, course_id): 65 semester = get_object_or_404(Semester, id=semester_id) 66 67 # staff can see preview of results even when course is not published 68 if request.user.is_staff: 69 course = get_object_or_404(semester.course_set.filter(state__in=['inEvaluation', 'evaluated', 'reviewed', 'published']), id=course_id) 70 # everyone else can only see published results 71 else: 72 course = get_object_or_404(semester.course_set.filter(state="published"), id=course_id) 73 74 sections = calculate_results(course, request.user.is_staff) 75 76 if (request.user.is_staff == False): # if user is not a student representative 77 # remove TextResults if user is neither the evaluated person (or a delegate) nor responsible for the course (or a delegate) 78 for section in sections: 79 if not user_can_see_textresults(request.user, course, section): 80 for index, result in list(enumerate(section.results))[::-1]: 81 if isinstance(section.results[index], TextResult): 82 del section.results[index] 83 84 # remove empty sections 85 sections = [section for section in sections if section.results] 86 87 # show a warning if course is still in evaluation (for staff preview) 88 evaluation_warning = course.state != 'published' 89 90 # check whether course has a sufficient number of votes for publishing it 91 sufficient_votes = course.num_voters >= settings.MIN_ANSWER_COUNT and float(course.num_voters) / course.num_participants >= settings.MIN_ANSWER_PERCENTAGE 92 93 # results for a course might not be visible because there are not enough answers 94 # but it can still be "published" e.g. to show the comment results to lecturers 95 # the FSR can still see all results but gets a warning message 96 sufficient_votes_warning = (not sufficient_votes) and request.user.is_staff 97 98 return render_to_response( 99 "results_course_detail.html", 100 dict( 101 course=course, 102 sections=sections, 103 evaluation_warning=evaluation_warning, 104 sufficient_votes_warning=sufficient_votes_warning 105 ), 106 context_instance=RequestContext(request)) 107 108 109 def user_can_see_textresults(user, course, section): 110 if section.contributor == user: 111 return True 112 if course.is_user_responsible_or_delegate(user): 113 return True 114 115 represented_userprofiles = user.represented_users.all() 116 represented_users = [profile.user for profile in represented_userprofiles] 117 if section.contributor in represented_users: 118 return True 119 120 return False 121 [end of evap/results/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/evap/results/views.py b/evap/results/views.py --- a/evap/results/views.py +++ b/evap/results/views.py @@ -95,6 +95,9 @@ # the FSR can still see all results but gets a warning message sufficient_votes_warning = (not sufficient_votes) and request.user.is_staff + course.avg_grade, course.med_grade = calculate_average_and_medium_grades(course) + course.can_publish_grades = can_publish_grades(course, request.user.is_staff) + return render_to_response( "results_course_detail.html", dict(
{"golden_diff": "diff --git a/evap/results/views.py b/evap/results/views.py\n--- a/evap/results/views.py\n+++ b/evap/results/views.py\n@@ -95,6 +95,9 @@\n # the FSR can still see all results but gets a warning message\n sufficient_votes_warning = (not sufficient_votes) and request.user.is_staff\n \n+ course.avg_grade, course.med_grade = calculate_average_and_medium_grades(course)\n+ course.can_publish_grades = can_publish_grades(course, request.user.is_staff)\n+\n return render_to_response(\n \"results_course_detail.html\",\n dict(\n", "issue": "Result detail pages need to show total grades and participation\nThe `results/course_detail` pages should again show the information that is already given on the overview page (average and medium grades and participation).\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.translation import get_language\n\nfrom evap.evaluation.auth import login_required, fsr_required\nfrom evap.evaluation.models import Semester\nfrom evap.evaluation.tools import calculate_results, calculate_average_and_medium_grades, TextResult, can_publish_grades\n\nfrom evap.results.exporters import ExcelExporter\n\n\n@login_required\ndef index(request):\n semesters = Semester.get_all_with_published_courses()\n\n return render_to_response(\n \"results_index.html\",\n dict(semesters=semesters),\n context_instance=RequestContext(request))\n\n\n@login_required\ndef semester_detail(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n courses = list(semester.course_set.filter(state=\"published\"))\n\n # annotate each course object with its grades\n for course in courses:\n # first, make sure that there are no preexisting grade attributes\n course.avg_grade, course.med_grade = calculate_average_and_medium_grades(course)\n course.can_publish_grades = can_publish_grades(course, request.user.is_staff)\n\n return render_to_response(\n \"results_semester_detail.html\",\n dict(\n semester=semester,\n courses=courses\n ),\n context_instance=RequestContext(request))\n\n\n@fsr_required\ndef semester_export(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n\n filename = \"Evaluation-%s-%s.xls\" % (semester.name, get_language())\n\n response = HttpResponse(mimetype=\"application/vnd.ms-excel\")\n response[\"Content-Disposition\"] = \"attachment; filename=\\\"%s\\\"\" % filename\n\n exporter = ExcelExporter(semester)\n\n if 'all' in request.GET:\n exporter.export(response, True)\n else:\n exporter.export(response)\n\n return response\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n \n # staff can see preview of results even when course is not published\n if request.user.is_staff:\n course = get_object_or_404(semester.course_set.filter(state__in=['inEvaluation', 'evaluated', 'reviewed', 'published']), id=course_id)\n # everyone else can only see published results\n else:\n course = get_object_or_404(semester.course_set.filter(state=\"published\"), id=course_id)\n\n sections = calculate_results(course, request.user.is_staff)\n\n if (request.user.is_staff == False): # if user is not a student representative\n # remove TextResults if user is neither the evaluated person (or a delegate) nor responsible for the course (or a delegate)\n for section in sections:\n if not user_can_see_textresults(request.user, course, section):\n for index, result in list(enumerate(section.results))[::-1]:\n if isinstance(section.results[index], TextResult):\n del section.results[index]\n\n # remove empty sections\n sections = [section for section in sections if section.results]\n\n # show a warning if course is still in evaluation (for staff preview)\n evaluation_warning = course.state != 'published'\n\n # check whether course has a sufficient number of votes for publishing it\n sufficient_votes = course.num_voters >= settings.MIN_ANSWER_COUNT and float(course.num_voters) / course.num_participants >= settings.MIN_ANSWER_PERCENTAGE\n\n # results for a course might not be visible because there are not enough answers\n # but it can still be \"published\" e.g. to show the comment results to lecturers\n # the FSR can still see all results but gets a warning message\n sufficient_votes_warning = (not sufficient_votes) and request.user.is_staff\n\n return render_to_response(\n \"results_course_detail.html\",\n dict(\n course=course,\n sections=sections,\n evaluation_warning=evaluation_warning,\n sufficient_votes_warning=sufficient_votes_warning\n ),\n context_instance=RequestContext(request))\n\n\ndef user_can_see_textresults(user, course, section):\n if section.contributor == user:\n return True\n if course.is_user_responsible_or_delegate(user):\n return True\n\n represented_userprofiles = user.represented_users.all()\n represented_users = [profile.user for profile in represented_userprofiles]\n if section.contributor in represented_users:\n return True\n\n return False\n", "path": "evap/results/views.py"}]}
1,813
135
gh_patches_debug_1765
rasdani/github-patches
git_diff
mne-tools__mne-bids-1193
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Q: Release? It would be good to cut a MNE-BIDS release before the MNE-Python 1.6 release because it looks like the `mne.io`->`mne._fiff` stuff isn't in the latest release (e.g., see [circleci main failure in MNE](https://app.circleci.com/pipelines/github/mne-tools/mne-python/21889/workflows/b613fb80-7a0e-4c45-b2cc-02b8f0d9e589/jobs/61509)). Any chance to cut one in the next couple of days? </issue> <code> [start of mne_bids/__init__.py] 1 """MNE software for easily interacting with BIDS compatible datasets.""" 2 3 __version__ = "0.14.dev0" 4 from mne_bids import commands 5 from mne_bids.report import make_report 6 from mne_bids.path import ( 7 BIDSPath, 8 get_datatypes, 9 get_entity_vals, 10 print_dir_tree, 11 get_entities_from_fname, 12 search_folder_for_text, 13 get_bids_path_from_fname, 14 find_matching_paths, 15 ) 16 from mne_bids.read import get_head_mri_trans, read_raw_bids 17 from mne_bids.utils import get_anonymization_daysback 18 from mne_bids.write import ( 19 make_dataset_description, 20 write_anat, 21 write_raw_bids, 22 mark_channels, 23 write_meg_calibration, 24 write_meg_crosstalk, 25 get_anat_landmarks, 26 anonymize_dataset, 27 ) 28 from mne_bids.sidecar_updates import update_sidecar_json, update_anat_landmarks 29 from mne_bids.inspect import inspect_dataset 30 from mne_bids.dig import ( 31 template_to_head, 32 convert_montage_to_ras, 33 convert_montage_to_mri, 34 ) 35 [end of mne_bids/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mne_bids/__init__.py b/mne_bids/__init__.py --- a/mne_bids/__init__.py +++ b/mne_bids/__init__.py @@ -1,6 +1,6 @@ """MNE software for easily interacting with BIDS compatible datasets.""" -__version__ = "0.14.dev0" +__version__ = "0.14" from mne_bids import commands from mne_bids.report import make_report from mne_bids.path import (
{"golden_diff": "diff --git a/mne_bids/__init__.py b/mne_bids/__init__.py\n--- a/mne_bids/__init__.py\n+++ b/mne_bids/__init__.py\n@@ -1,6 +1,6 @@\n \"\"\"MNE software for easily interacting with BIDS compatible datasets.\"\"\"\n \n-__version__ = \"0.14.dev0\"\n+__version__ = \"0.14\"\n from mne_bids import commands\n from mne_bids.report import make_report\n from mne_bids.path import (\n", "issue": "Q: Release?\nIt would be good to cut a MNE-BIDS release before the MNE-Python 1.6 release because it looks like the `mne.io`->`mne._fiff` stuff isn't in the latest release (e.g., see [circleci main failure in MNE](https://app.circleci.com/pipelines/github/mne-tools/mne-python/21889/workflows/b613fb80-7a0e-4c45-b2cc-02b8f0d9e589/jobs/61509)). Any chance to cut one in the next couple of days?\n", "before_files": [{"content": "\"\"\"MNE software for easily interacting with BIDS compatible datasets.\"\"\"\n\n__version__ = \"0.14.dev0\"\nfrom mne_bids import commands\nfrom mne_bids.report import make_report\nfrom mne_bids.path import (\n BIDSPath,\n get_datatypes,\n get_entity_vals,\n print_dir_tree,\n get_entities_from_fname,\n search_folder_for_text,\n get_bids_path_from_fname,\n find_matching_paths,\n)\nfrom mne_bids.read import get_head_mri_trans, read_raw_bids\nfrom mne_bids.utils import get_anonymization_daysback\nfrom mne_bids.write import (\n make_dataset_description,\n write_anat,\n write_raw_bids,\n mark_channels,\n write_meg_calibration,\n write_meg_crosstalk,\n get_anat_landmarks,\n anonymize_dataset,\n)\nfrom mne_bids.sidecar_updates import update_sidecar_json, update_anat_landmarks\nfrom mne_bids.inspect import inspect_dataset\nfrom mne_bids.dig import (\n template_to_head,\n convert_montage_to_ras,\n convert_montage_to_mri,\n)\n", "path": "mne_bids/__init__.py"}]}
994
119
gh_patches_debug_19875
rasdani/github-patches
git_diff
OpenNMT__OpenNMT-py-1143
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The comment in onmt.modules.average_attn may be wrong The comment in onmt.modules.average_attn.AverageAttention.forward says that it gets a input of size `[batch_size x input_len x model_dim]`, and outputs a gating_outputs and a average_outputs both with size `[batch_size x 1 x model_dim]`. However, in my test the two outputs are both with size [batch_size x input_len x model_dim]. ```python import torch import onmt.modules.average_attn as Att model = Att.AverageAttention(3) a = torch.arange(24).reshape(2,4,3).float() print("intput size:", a.size()) o1, o2 = model(a) print("o1 size:", o1.size()) print("o2.size:", o2.size()) ``` intput size: torch.Size([2, 4, 3]) o1 size: torch.Size([2, 4, 3]) o2 size: torch.Size([2, 4, 3]) </issue> <code> [start of onmt/modules/average_attn.py] 1 # -*- coding: utf-8 -*- 2 """ Average Attention module """ 3 4 import torch 5 import torch.nn as nn 6 7 from onmt.modules.position_ffn import PositionwiseFeedForward 8 9 10 class AverageAttention(nn.Module): 11 """ 12 Average Attention module from 13 "Accelerating Neural Transformer via an Average Attention Network" 14 :cite:`https://arxiv.org/abs/1805.00631`. 15 16 Args: 17 model_dim (int): the dimension of keys/values/queries, 18 must be divisible by head_count 19 dropout (float): dropout parameter 20 """ 21 22 def __init__(self, model_dim, dropout=0.1): 23 self.model_dim = model_dim 24 25 super(AverageAttention, self).__init__() 26 27 self.average_layer = PositionwiseFeedForward(model_dim, model_dim, 28 dropout) 29 self.gating_layer = nn.Linear(model_dim * 2, model_dim * 2) 30 31 def cumulative_average_mask(self, batch_size, inputs_len): 32 """ 33 Builds the mask to compute the cumulative average as described in 34 https://arxiv.org/abs/1805.00631 -- Figure 3 35 36 Args: 37 batch_size (int): batch size 38 inputs_len (int): length of the inputs 39 40 Returns: 41 (`FloatTensor`): 42 43 * A Tensor of shape `[batch_size x input_len x input_len]` 44 """ 45 46 triangle = torch.tril(torch.ones(inputs_len, inputs_len)) 47 weights = torch.ones(1, inputs_len) / torch.arange( 48 1, inputs_len + 1, dtype=torch.float) 49 mask = triangle * weights.transpose(0, 1) 50 51 return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len) 52 53 def cumulative_average(self, inputs, mask_or_step, 54 layer_cache=None, step=None): 55 """ 56 Computes the cumulative average as described in 57 https://arxiv.org/abs/1805.00631 -- Equations (1) (5) (6) 58 59 Args: 60 inputs (`FloatTensor`): sequence to average 61 `[batch_size x input_len x dimension]` 62 mask_or_step: if cache is set, this is assumed 63 to be the current step of the 64 dynamic decoding. Otherwise, it is the mask matrix 65 used to compute the cumulative average. 66 cache: a dictionary containing the cumulative average 67 of the previous step. 68 """ 69 if layer_cache is not None: 70 step = mask_or_step 71 device = inputs.device 72 average_attention = (inputs + step * 73 layer_cache["prev_g"].to(device)) / (step + 1) 74 layer_cache["prev_g"] = average_attention 75 return average_attention 76 else: 77 mask = mask_or_step 78 return torch.matmul(mask, inputs) 79 80 def forward(self, inputs, mask=None, layer_cache=None, step=None): 81 """ 82 Args: 83 inputs (`FloatTensor`): `[batch_size x input_len x model_dim]` 84 85 Returns: 86 (`FloatTensor`, `FloatTensor`): 87 88 * gating_outputs `[batch_size x 1 x model_dim]` 89 * average_outputs average attention `[batch_size x 1 x model_dim]` 90 """ 91 batch_size = inputs.size(0) 92 inputs_len = inputs.size(1) 93 94 device = inputs.device 95 average_outputs = self.cumulative_average( 96 inputs, self.cumulative_average_mask(batch_size, 97 inputs_len).to(device).float() 98 if layer_cache is None else step, layer_cache=layer_cache) 99 average_outputs = self.average_layer(average_outputs) 100 gating_outputs = self.gating_layer(torch.cat((inputs, 101 average_outputs), -1)) 102 input_gate, forget_gate = torch.chunk(gating_outputs, 2, dim=2) 103 gating_outputs = torch.sigmoid(input_gate) * inputs + \ 104 torch.sigmoid(forget_gate) * average_outputs 105 106 return gating_outputs, average_outputs 107 [end of onmt/modules/average_attn.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/onmt/modules/average_attn.py b/onmt/modules/average_attn.py --- a/onmt/modules/average_attn.py +++ b/onmt/modules/average_attn.py @@ -65,6 +65,9 @@ used to compute the cumulative average. cache: a dictionary containing the cumulative average of the previous step. + + Returns: + a tensor of the same shape and type as :obj:`inputs`. """ if layer_cache is not None: step = mask_or_step @@ -85,8 +88,9 @@ Returns: (`FloatTensor`, `FloatTensor`): - * gating_outputs `[batch_size x 1 x model_dim]` - * average_outputs average attention `[batch_size x 1 x model_dim]` + * gating_outputs `[batch_size x input_len x model_dim]` + * average_outputs average attention + `[batch_size x input_len x model_dim]` """ batch_size = inputs.size(0) inputs_len = inputs.size(1)
{"golden_diff": "diff --git a/onmt/modules/average_attn.py b/onmt/modules/average_attn.py\n--- a/onmt/modules/average_attn.py\n+++ b/onmt/modules/average_attn.py\n@@ -65,6 +65,9 @@\n used to compute the cumulative average.\n cache: a dictionary containing the cumulative average\n of the previous step.\n+\n+ Returns:\n+ a tensor of the same shape and type as :obj:`inputs`.\n \"\"\"\n if layer_cache is not None:\n step = mask_or_step\n@@ -85,8 +88,9 @@\n Returns:\n (`FloatTensor`, `FloatTensor`):\n \n- * gating_outputs `[batch_size x 1 x model_dim]`\n- * average_outputs average attention `[batch_size x 1 x model_dim]`\n+ * gating_outputs `[batch_size x input_len x model_dim]`\n+ * average_outputs average attention\n+ `[batch_size x input_len x model_dim]`\n \"\"\"\n batch_size = inputs.size(0)\n inputs_len = inputs.size(1)\n", "issue": "The comment in onmt.modules.average_attn may be wrong\nThe comment in onmt.modules.average_attn.AverageAttention.forward says that it gets a input of size `[batch_size x input_len x model_dim]`, and outputs a gating_outputs and a average_outputs both with size `[batch_size x 1 x model_dim]`. However, in my test the two outputs are both with size [batch_size x input_len x model_dim].\r\n```python\r\nimport torch\r\nimport onmt.modules.average_attn as Att\r\nmodel = Att.AverageAttention(3)\r\na = torch.arange(24).reshape(2,4,3).float()\r\nprint(\"intput size:\", a.size())\r\no1, o2 = model(a)\r\nprint(\"o1 size:\", o1.size())\r\nprint(\"o2.size:\", o2.size())\r\n```\r\nintput size: torch.Size([2, 4, 3])\r\no1 size: torch.Size([2, 4, 3])\r\no2 size: torch.Size([2, 4, 3])\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\" Average Attention module \"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom onmt.modules.position_ffn import PositionwiseFeedForward\n\n\nclass AverageAttention(nn.Module):\n \"\"\"\n Average Attention module from\n \"Accelerating Neural Transformer via an Average Attention Network\"\n :cite:`https://arxiv.org/abs/1805.00631`.\n\n Args:\n model_dim (int): the dimension of keys/values/queries,\n must be divisible by head_count\n dropout (float): dropout parameter\n \"\"\"\n\n def __init__(self, model_dim, dropout=0.1):\n self.model_dim = model_dim\n\n super(AverageAttention, self).__init__()\n\n self.average_layer = PositionwiseFeedForward(model_dim, model_dim,\n dropout)\n self.gating_layer = nn.Linear(model_dim * 2, model_dim * 2)\n\n def cumulative_average_mask(self, batch_size, inputs_len):\n \"\"\"\n Builds the mask to compute the cumulative average as described in\n https://arxiv.org/abs/1805.00631 -- Figure 3\n\n Args:\n batch_size (int): batch size\n inputs_len (int): length of the inputs\n\n Returns:\n (`FloatTensor`):\n\n * A Tensor of shape `[batch_size x input_len x input_len]`\n \"\"\"\n\n triangle = torch.tril(torch.ones(inputs_len, inputs_len))\n weights = torch.ones(1, inputs_len) / torch.arange(\n 1, inputs_len + 1, dtype=torch.float)\n mask = triangle * weights.transpose(0, 1)\n\n return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len)\n\n def cumulative_average(self, inputs, mask_or_step,\n layer_cache=None, step=None):\n \"\"\"\n Computes the cumulative average as described in\n https://arxiv.org/abs/1805.00631 -- Equations (1) (5) (6)\n\n Args:\n inputs (`FloatTensor`): sequence to average\n `[batch_size x input_len x dimension]`\n mask_or_step: if cache is set, this is assumed\n to be the current step of the\n dynamic decoding. Otherwise, it is the mask matrix\n used to compute the cumulative average.\n cache: a dictionary containing the cumulative average\n of the previous step.\n \"\"\"\n if layer_cache is not None:\n step = mask_or_step\n device = inputs.device\n average_attention = (inputs + step *\n layer_cache[\"prev_g\"].to(device)) / (step + 1)\n layer_cache[\"prev_g\"] = average_attention\n return average_attention\n else:\n mask = mask_or_step\n return torch.matmul(mask, inputs)\n\n def forward(self, inputs, mask=None, layer_cache=None, step=None):\n \"\"\"\n Args:\n inputs (`FloatTensor`): `[batch_size x input_len x model_dim]`\n\n Returns:\n (`FloatTensor`, `FloatTensor`):\n\n * gating_outputs `[batch_size x 1 x model_dim]`\n * average_outputs average attention `[batch_size x 1 x model_dim]`\n \"\"\"\n batch_size = inputs.size(0)\n inputs_len = inputs.size(1)\n\n device = inputs.device\n average_outputs = self.cumulative_average(\n inputs, self.cumulative_average_mask(batch_size,\n inputs_len).to(device).float()\n if layer_cache is None else step, layer_cache=layer_cache)\n average_outputs = self.average_layer(average_outputs)\n gating_outputs = self.gating_layer(torch.cat((inputs,\n average_outputs), -1))\n input_gate, forget_gate = torch.chunk(gating_outputs, 2, dim=2)\n gating_outputs = torch.sigmoid(input_gate) * inputs + \\\n torch.sigmoid(forget_gate) * average_outputs\n\n return gating_outputs, average_outputs\n", "path": "onmt/modules/average_attn.py"}]}
1,829
236
gh_patches_debug_25131
rasdani/github-patches
git_diff
lightly-ai__lightly-213
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix imports in __init__ and add tests # Fix import errors Closes #213 I fixed the import errors, e.g. ``` import lightly lightly.data.LightlyDataset ``` should work now. I also added tests for all import that I found important. Bumped the version number so we can quickly release this hotfix. Added SimSiam to the list of models in the description. </issue> <code> [start of lightly/__init__.py] 1 """Lightly is a computer vision framework for self-supervised learning. 2 3 With Lightly you can train deep learning models using 4 self-supervision. This means, that you don't require 5 any labels to train a model. Lightly has been built 6 to help you understand and work with large unlabeled datasets. 7 It is built on top of PyTorch and therefore fully compatible 8 with other frameworks such as Fast.ai. 9 10 The framework is structured into the following modules: 11 12 - **api**: 13 14 The lightly.api module handles communication with the Lightly web-app. 15 16 - **cli**: 17 18 The lightly.cli module provides a command-line interface for training 19 self-supervised models and embedding images. Furthermore, the command-line 20 tool can be used to upload and download images from/to the Lightly web-app. 21 22 - **core**: 23 24 The lightly.core module offers one-liners for simple self-supervised learning. 25 26 - **data**: 27 28 The lightly.data module provides a dataset wrapper and collate functions. The 29 collate functions are in charge of the data augmentations which are crucial for 30 self-supervised learning. 31 32 - **embedding**: 33 34 The lightly.embedding module combines the self-supervised models with a dataloader, 35 optimizer, and loss function to provide a simple pytorch-lightning trainable. 36 37 - **loss**: 38 39 The lightly.loss module contains implementations of popular self-supervised training 40 loss functions. 41 42 - **models**: 43 44 The lightly.models module holds the implementation of the ResNet as well as self- 45 supervised methods. Currently implements: 46 47 - SimCLR 48 49 - MoCo 50 51 - **transforms**: 52 53 The lightly.transforms module implements custom data transforms. Currently implements: 54 55 - Gaussian Blur 56 57 - Random Rotation 58 59 - **utils**: 60 61 The lightly.utils package provides global utility methods. 62 The io module contains utility to save and load embeddings in a format which is 63 understood by the Lightly library. 64 65 """ 66 67 # Copyright (c) 2020. Lightly AG and its affiliates. 68 # All Rights Reserved 69 70 __name__ = 'lightly' 71 __version__ = '1.1.0' 72 73 74 try: 75 # See (https://github.com/PyTorchLightning/pytorch-lightning) 76 # This variable is injected in the __builtins__ by the build 77 # process. It used to enable importing subpackages of skimage when 78 # the binaries are not built 79 __LIGHTLY_SETUP__ 80 except NameError: 81 __LIGHTLY_SETUP__ = False 82 83 84 if __LIGHTLY_SETUP__: 85 # setting up lightly 86 msg = f'Partial import of {__name__}=={__version__} during build process.' 87 print(msg) 88 else: 89 # see if prefetch_generator is available 90 try: 91 import prefetch_generator 92 except ImportError: 93 _prefetch_generator_available = False 94 else: 95 _prefetch_generator_available = True 96 97 def _is_prefetch_generator_available(): 98 return _prefetch_generator_available 99 100 # check for latest version 101 from lightly.api.version_checking import get_latest_version, version_compare, pretty_print_latest_version 102 103 latest_version = get_latest_version(__version__) 104 if latest_version is not None: 105 if version_compare(__version__, latest_version) < 0: 106 # local version is behind latest version 107 pretty_print_latest_version(latest_version) 108 [end of lightly/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lightly/__init__.py b/lightly/__init__.py --- a/lightly/__init__.py +++ b/lightly/__init__.py @@ -48,6 +48,8 @@ - MoCo + - SimSiam + - **transforms**: The lightly.transforms module implements custom data transforms. Currently implements: @@ -68,7 +70,7 @@ # All Rights Reserved __name__ = 'lightly' -__version__ = '1.1.0' +__version__ = '1.1.1' try: @@ -97,8 +99,22 @@ def _is_prefetch_generator_available(): return _prefetch_generator_available + from lightly.core import * + from lightly import active_learning + from lightly import api + from lightly import data + from lightly import embedding + from lightly import loss + from lightly import models + from lightly import openapi_generated + from lightly import transforms + from lightly import utils + + # check for latest version - from lightly.api.version_checking import get_latest_version, version_compare, pretty_print_latest_version + from lightly.api.version_checking import get_latest_version + from lightly.api.version_checking import version_compare + from lightly.api.version_checking import pretty_print_latest_version latest_version = get_latest_version(__version__) if latest_version is not None:
{"golden_diff": "diff --git a/lightly/__init__.py b/lightly/__init__.py\n--- a/lightly/__init__.py\n+++ b/lightly/__init__.py\n@@ -48,6 +48,8 @@\n \n - MoCo\n \n+ - SimSiam\n+\n - **transforms**:\n \n The lightly.transforms module implements custom data transforms. Currently implements:\n@@ -68,7 +70,7 @@\n # All Rights Reserved\n \n __name__ = 'lightly'\n-__version__ = '1.1.0'\n+__version__ = '1.1.1'\n \n \n try:\n@@ -97,8 +99,22 @@\n def _is_prefetch_generator_available():\n return _prefetch_generator_available\n \n+ from lightly.core import *\n+ from lightly import active_learning\n+ from lightly import api\n+ from lightly import data\n+ from lightly import embedding\n+ from lightly import loss\n+ from lightly import models\n+ from lightly import openapi_generated\n+ from lightly import transforms\n+ from lightly import utils\n+\n+\n # check for latest version\n- from lightly.api.version_checking import get_latest_version, version_compare, pretty_print_latest_version\n+ from lightly.api.version_checking import get_latest_version\n+ from lightly.api.version_checking import version_compare\n+ from lightly.api.version_checking import pretty_print_latest_version\n \n latest_version = get_latest_version(__version__)\n if latest_version is not None:\n", "issue": "Fix imports in __init__ and add tests\n# Fix import errors\r\n\r\nCloses #213 \r\n\r\nI fixed the import errors, e.g. \r\n```\r\nimport lightly\r\nlightly.data.LightlyDataset\r\n```\r\nshould work now. I also added tests for all import that I found important.\r\n\r\nBumped the version number so we can quickly release this hotfix.\r\n\r\nAdded SimSiam to the list of models in the description.\n", "before_files": [{"content": "\"\"\"Lightly is a computer vision framework for self-supervised learning.\n\nWith Lightly you can train deep learning models using\nself-supervision. This means, that you don't require\nany labels to train a model. Lightly has been built\nto help you understand and work with large unlabeled datasets.\nIt is built on top of PyTorch and therefore fully compatible \nwith other frameworks such as Fast.ai.\n\nThe framework is structured into the following modules:\n\n- **api**: \n\n The lightly.api module handles communication with the Lightly web-app.\n\n- **cli**:\n\n The lightly.cli module provides a command-line interface for training \n self-supervised models and embedding images. Furthermore, the command-line\n tool can be used to upload and download images from/to the Lightly web-app.\n\n- **core**:\n\n The lightly.core module offers one-liners for simple self-supervised learning.\n\n- **data**:\n\n The lightly.data module provides a dataset wrapper and collate functions. The\n collate functions are in charge of the data augmentations which are crucial for\n self-supervised learning.\n\n- **embedding**:\n\n The lightly.embedding module combines the self-supervised models with a dataloader,\n optimizer, and loss function to provide a simple pytorch-lightning trainable.\n\n- **loss**:\n\n The lightly.loss module contains implementations of popular self-supervised training\n loss functions.\n\n- **models**:\n\n The lightly.models module holds the implementation of the ResNet as well as self-\n supervised methods. Currently implements:\n\n - SimCLR\n\n - MoCo\n\n- **transforms**:\n\n The lightly.transforms module implements custom data transforms. Currently implements:\n\n - Gaussian Blur\n\n - Random Rotation\n\n- **utils**:\n\n The lightly.utils package provides global utility methods.\n The io module contains utility to save and load embeddings in a format which is\n understood by the Lightly library.\n\n\"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\n__name__ = 'lightly'\n__version__ = '1.1.0'\n\n\ntry:\n # See (https://github.com/PyTorchLightning/pytorch-lightning)\n # This variable is injected in the __builtins__ by the build\n # process. It used to enable importing subpackages of skimage when\n # the binaries are not built\n __LIGHTLY_SETUP__\nexcept NameError:\n __LIGHTLY_SETUP__ = False\n\n\nif __LIGHTLY_SETUP__:\n #\u00a0setting up lightly\n msg = f'Partial import of {__name__}=={__version__} during build process.' \n print(msg)\nelse:\n #\u00a0see if prefetch_generator is available\n try:\n import prefetch_generator\n except ImportError:\n _prefetch_generator_available = False\n else:\n _prefetch_generator_available = True\n\n def _is_prefetch_generator_available():\n return _prefetch_generator_available\n\n # check for latest version\n from lightly.api.version_checking import get_latest_version, version_compare, pretty_print_latest_version\n\n latest_version = get_latest_version(__version__)\n if latest_version is not None:\n if version_compare(__version__, latest_version) < 0:\n # local version is behind latest version\n pretty_print_latest_version(latest_version)\n", "path": "lightly/__init__.py"}]}
1,574
323
gh_patches_debug_304
rasdani/github-patches
git_diff
statsmodels__statsmodels-507
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> numdifftools dependency Original Launchpad bug 653902: https://bugs.launchpad.net/statsmodels/+bug/653902 Reported by: vincent-vincentdavis (Vincent Davis). statsmodels/_init_.py imports tsa Which then returns an exception from statsmodels/tsa/var.py "raise Warning("You need to install numdifftools to try out the AR model")" Should numdifftools be a dependency for all of statsmodels ? </issue> <code> [start of statsmodels/tsa/mlemodel.py] 1 """Base Classes for Likelihood Models in time series analysis 2 3 Warning: imports numdifftools 4 5 6 7 Created on Sun Oct 10 15:00:47 2010 8 9 Author: josef-pktd 10 License: BSD 11 12 """ 13 14 import numpy as np 15 16 import numdifftools as ndt 17 18 from statsmodels.base.model import LikelihoodModel 19 20 #copied from sandbox/regression/mle.py 21 #TODO: I take it this is only a stub and should be included in another 22 # model class? 23 class TSMLEModel(LikelihoodModel): 24 """ 25 univariate time series model for estimation with maximum likelihood 26 27 Note: This is not working yet 28 """ 29 30 def __init__(self, endog, exog=None): 31 #need to override p,q (nar,nma) correctly 32 super(TSMLEModel, self).__init__(endog, exog) 33 #set default arma(1,1) 34 self.nar = 1 35 self.nma = 1 36 #self.initialize() 37 38 def geterrors(self, params): 39 raise NotImplementedError 40 41 def loglike(self, params): 42 """ 43 Loglikelihood for timeseries model 44 45 Notes 46 ----- 47 needs to be overwritten by subclass 48 """ 49 raise NotImplementedError 50 51 52 def score(self, params): 53 """ 54 Score vector for Arma model 55 """ 56 #return None 57 #print params 58 jac = ndt.Jacobian(self.loglike, stepMax=1e-4) 59 return jac(params)[-1] 60 61 def hessian(self, params): 62 """ 63 Hessian of arma model. Currently uses numdifftools 64 """ 65 #return None 66 Hfun = ndt.Jacobian(self.score, stepMax=1e-4) 67 return Hfun(params)[-1] 68 69 70 def fit(self, start_params=None, maxiter=5000, method='fmin', tol=1e-08): 71 '''estimate model by minimizing negative loglikelihood 72 73 does this need to be overwritten ? 74 ''' 75 if start_params is None and hasattr(self, '_start_params'): 76 start_params = self._start_params 77 #start_params = np.concatenate((0.05*np.ones(self.nar + self.nma), [1])) 78 mlefit = super(TSMLEModel, self).fit(start_params=start_params, 79 maxiter=maxiter, method=method, tol=tol) 80 return mlefit 81 [end of statsmodels/tsa/mlemodel.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/statsmodels/tsa/mlemodel.py b/statsmodels/tsa/mlemodel.py --- a/statsmodels/tsa/mlemodel.py +++ b/statsmodels/tsa/mlemodel.py @@ -13,7 +13,10 @@ import numpy as np -import numdifftools as ndt +try: + import numdifftools as ndt +except: + pass from statsmodels.base.model import LikelihoodModel
{"golden_diff": "diff --git a/statsmodels/tsa/mlemodel.py b/statsmodels/tsa/mlemodel.py\n--- a/statsmodels/tsa/mlemodel.py\n+++ b/statsmodels/tsa/mlemodel.py\n@@ -13,7 +13,10 @@\n \n import numpy as np\n \n-import numdifftools as ndt\n+try:\n+ import numdifftools as ndt\n+except:\n+ pass\n \n from statsmodels.base.model import LikelihoodModel\n", "issue": "numdifftools dependency\nOriginal Launchpad bug 653902: https://bugs.launchpad.net/statsmodels/+bug/653902\nReported by: vincent-vincentdavis (Vincent Davis).\n\nstatsmodels/_init_.py imports tsa\nWhich then returns an exception from statsmodels/tsa/var.py \"raise Warning(\"You need to install numdifftools to try out the AR model\")\"\nShould numdifftools be a dependency for all of statsmodels ?\n\n", "before_files": [{"content": "\"\"\"Base Classes for Likelihood Models in time series analysis\n\nWarning: imports numdifftools\n\n\n\nCreated on Sun Oct 10 15:00:47 2010\n\nAuthor: josef-pktd\nLicense: BSD\n\n\"\"\"\n\nimport numpy as np\n\nimport numdifftools as ndt\n\nfrom statsmodels.base.model import LikelihoodModel\n\n#copied from sandbox/regression/mle.py\n#TODO: I take it this is only a stub and should be included in another\n# model class?\nclass TSMLEModel(LikelihoodModel):\n \"\"\"\n univariate time series model for estimation with maximum likelihood\n\n Note: This is not working yet\n \"\"\"\n\n def __init__(self, endog, exog=None):\n #need to override p,q (nar,nma) correctly\n super(TSMLEModel, self).__init__(endog, exog)\n #set default arma(1,1)\n self.nar = 1\n self.nma = 1\n #self.initialize()\n\n def geterrors(self, params):\n raise NotImplementedError\n\n def loglike(self, params):\n \"\"\"\n Loglikelihood for timeseries model\n\n Notes\n -----\n needs to be overwritten by subclass\n \"\"\"\n raise NotImplementedError\n\n\n def score(self, params):\n \"\"\"\n Score vector for Arma model\n \"\"\"\n #return None\n #print params\n jac = ndt.Jacobian(self.loglike, stepMax=1e-4)\n return jac(params)[-1]\n\n def hessian(self, params):\n \"\"\"\n Hessian of arma model. Currently uses numdifftools\n \"\"\"\n #return None\n Hfun = ndt.Jacobian(self.score, stepMax=1e-4)\n return Hfun(params)[-1]\n\n\n def fit(self, start_params=None, maxiter=5000, method='fmin', tol=1e-08):\n '''estimate model by minimizing negative loglikelihood\n\n does this need to be overwritten ?\n '''\n if start_params is None and hasattr(self, '_start_params'):\n start_params = self._start_params\n #start_params = np.concatenate((0.05*np.ones(self.nar + self.nma), [1]))\n mlefit = super(TSMLEModel, self).fit(start_params=start_params,\n maxiter=maxiter, method=method, tol=tol)\n return mlefit\n", "path": "statsmodels/tsa/mlemodel.py"}]}
1,348
104
gh_patches_debug_5341
rasdani/github-patches
git_diff
statsmodels__statsmodels-6290
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> is sm.open_help ever defined? Near the end of `statsmodels.api`: ``` chmpath = os.path.join(os.path.dirname(__file__), 'statsmodelsdoc.chm') if os.path.exists(chmpath): def open_help(chmpath=chmpath): from subprocess import Popen p = Popen(chmpath, shell=True) ``` I'm not familiar with "statsmodelsdoc.chm". Under what conditions does that exist? </issue> <code> [start of statsmodels/api.py] 1 # -*- coding: utf-8 -*- 2 # flake8: noqa 3 import os 4 5 from . import iolib 6 from . import datasets 7 from . import tools 8 from .tools.tools import add_constant, categorical 9 from . import regression 10 from .regression.linear_model import OLS, GLS, WLS, GLSAR 11 from .regression.recursive_ls import RecursiveLS 12 from .regression.quantile_regression import QuantReg 13 from .regression.mixed_linear_model import MixedLM 14 from .genmod import api as genmod 15 from .genmod.api import (GLM, GEE, OrdinalGEE, NominalGEE, families, 16 cov_struct, 17 BinomialBayesMixedGLM, PoissonBayesMixedGLM) 18 from . import robust 19 from .robust.robust_linear_model import RLM 20 from .discrete.discrete_model import (Poisson, Logit, Probit, 21 MNLogit, NegativeBinomial, 22 GeneralizedPoisson, 23 NegativeBinomialP) 24 from .discrete.count_model import (ZeroInflatedPoisson, 25 ZeroInflatedGeneralizedPoisson, 26 ZeroInflatedNegativeBinomialP) 27 from .tsa import api as tsa 28 from .duration.survfunc import SurvfuncRight 29 from .duration.hazard_regression import PHReg 30 from .imputation.mice import MICE, MICEData 31 from .imputation.bayes_mi import BayesGaussMI, MI 32 from .nonparametric import api as nonparametric 33 from . import distributions 34 from .__init__ import test 35 36 from .gam.generalized_additive_model import GLMGam 37 from .gam import api as gam 38 from .graphics.gofplots import qqplot, qqplot_2samples, qqline, ProbPlot 39 from .graphics import api as graphics 40 from .stats import api as stats 41 from .emplike import api as emplike 42 from .duration import api as duration 43 from .multivariate.pca import PCA 44 from .multivariate.manova import MANOVA 45 from .multivariate.factor import Factor 46 from .multivariate import api as multivariate 47 48 from .formula import api as formula 49 50 from .iolib.smpickle import load_pickle 51 52 from .tools.print_version import show_versions 53 from .tools.web import webdoc 54 55 load = load_pickle 56 57 chmpath = os.path.join(os.path.dirname(__file__), 'statsmodelsdoc.chm') 58 if os.path.exists(chmpath): 59 # As of 0.10.0, this is not reached. See GH#5134 60 61 def open_help(chmpath=chmpath): 62 from subprocess import Popen 63 64 p = Popen(chmpath, shell=True) 65 66 del os 67 del chmpath 68 69 from ._version import get_versions 70 __version__ = get_versions()['version'] 71 del get_versions 72 [end of statsmodels/api.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/statsmodels/api.py b/statsmodels/api.py --- a/statsmodels/api.py +++ b/statsmodels/api.py @@ -54,18 +54,6 @@ load = load_pickle -chmpath = os.path.join(os.path.dirname(__file__), 'statsmodelsdoc.chm') -if os.path.exists(chmpath): - # As of 0.10.0, this is not reached. See GH#5134 - - def open_help(chmpath=chmpath): - from subprocess import Popen - - p = Popen(chmpath, shell=True) - -del os -del chmpath - from ._version import get_versions __version__ = get_versions()['version'] del get_versions
{"golden_diff": "diff --git a/statsmodels/api.py b/statsmodels/api.py\n--- a/statsmodels/api.py\n+++ b/statsmodels/api.py\n@@ -54,18 +54,6 @@\n \n load = load_pickle\n \n-chmpath = os.path.join(os.path.dirname(__file__), 'statsmodelsdoc.chm')\n-if os.path.exists(chmpath):\n- # As of 0.10.0, this is not reached. See GH#5134\n-\n- def open_help(chmpath=chmpath):\n- from subprocess import Popen\n-\n- p = Popen(chmpath, shell=True)\n-\n-del os\n-del chmpath\n-\n from ._version import get_versions\n __version__ = get_versions()['version']\n del get_versions\n", "issue": "is sm.open_help ever defined?\nNear the end of `statsmodels.api`:\r\n\r\n```\r\nchmpath = os.path.join(os.path.dirname(__file__), 'statsmodelsdoc.chm')\r\nif os.path.exists(chmpath):\r\n def open_help(chmpath=chmpath):\r\n from subprocess import Popen\r\n\r\n p = Popen(chmpath, shell=True)\r\n```\r\n\r\nI'm not familiar with \"statsmodelsdoc.chm\". Under what conditions does that exist?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# flake8: noqa\nimport os\n\nfrom . import iolib\nfrom . import datasets\nfrom . import tools\nfrom .tools.tools import add_constant, categorical\nfrom . import regression\nfrom .regression.linear_model import OLS, GLS, WLS, GLSAR\nfrom .regression.recursive_ls import RecursiveLS\nfrom .regression.quantile_regression import QuantReg\nfrom .regression.mixed_linear_model import MixedLM\nfrom .genmod import api as genmod\nfrom .genmod.api import (GLM, GEE, OrdinalGEE, NominalGEE, families,\n cov_struct,\n BinomialBayesMixedGLM, PoissonBayesMixedGLM)\nfrom . import robust\nfrom .robust.robust_linear_model import RLM\nfrom .discrete.discrete_model import (Poisson, Logit, Probit,\n MNLogit, NegativeBinomial,\n GeneralizedPoisson,\n NegativeBinomialP)\nfrom .discrete.count_model import (ZeroInflatedPoisson,\n ZeroInflatedGeneralizedPoisson,\n ZeroInflatedNegativeBinomialP)\nfrom .tsa import api as tsa\nfrom .duration.survfunc import SurvfuncRight\nfrom .duration.hazard_regression import PHReg\nfrom .imputation.mice import MICE, MICEData\nfrom .imputation.bayes_mi import BayesGaussMI, MI\nfrom .nonparametric import api as nonparametric\nfrom . import distributions\nfrom .__init__ import test\n\nfrom .gam.generalized_additive_model import GLMGam\nfrom .gam import api as gam\nfrom .graphics.gofplots import qqplot, qqplot_2samples, qqline, ProbPlot\nfrom .graphics import api as graphics\nfrom .stats import api as stats\nfrom .emplike import api as emplike\nfrom .duration import api as duration\nfrom .multivariate.pca import PCA\nfrom .multivariate.manova import MANOVA\nfrom .multivariate.factor import Factor\nfrom .multivariate import api as multivariate\n\nfrom .formula import api as formula\n\nfrom .iolib.smpickle import load_pickle\n\nfrom .tools.print_version import show_versions\nfrom .tools.web import webdoc\n\nload = load_pickle\n\nchmpath = os.path.join(os.path.dirname(__file__), 'statsmodelsdoc.chm')\nif os.path.exists(chmpath):\n # As of 0.10.0, this is not reached. See GH#5134\n\n def open_help(chmpath=chmpath):\n from subprocess import Popen\n\n p = Popen(chmpath, shell=True)\n\ndel os\ndel chmpath\n\nfrom ._version import get_versions\n__version__ = get_versions()['version']\ndel get_versions\n", "path": "statsmodels/api.py"}]}
1,392
169
gh_patches_debug_49673
rasdani/github-patches
git_diff
kserve__kserve-2835
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> No matches for kind \"HorizontalPodAutoscaler\" in version \"autoscaling/v2beta2\ /kind bug **What steps did you take and what happened:** Deploy kserve in raw mode on kubernetes 1.26 where autoscaling/v2beta2 is no longer available **What did you expect to happen:** Kserve should support v2 of the api </issue> <code> [start of hack/python-sdk/update_release_version_helper.py] 1 #!/usr/bin/env python3 2 3 # Copyright 2023 The KServe Authors. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 import tomlkit 18 import argparse 19 20 parser = argparse.ArgumentParser(description="Update release version in python toml files") 21 parser.add_argument("version", type=str, help="release version") 22 args, _ = parser.parse_known_args() 23 24 toml_files = [ 25 "python/kserve/pyproject.toml", 26 "python/aiffairness/pyproject.toml", 27 "python/aixexplainer/pyproject.toml", 28 "python/alibiexplainer/pyproject.toml", 29 "python/artexplainer/pyproject.toml", 30 "python/custom_model/pyproject.toml", 31 "python/custom_transformer/pyproject.toml", 32 "python/lgbserver/pyproject.toml", 33 "python/paddleserver/pyproject.toml", 34 "python/pmmlserver/pyproject.toml", 35 "python/sklearnserver/pyproject.toml", 36 "python/xgbserver/pyproject.toml", 37 ] 38 39 for toml_file in toml_files: 40 with open(toml_file, "r") as file: 41 toml_config = tomlkit.load(file) 42 toml_config['tool']['poetry']['version'] = args.version 43 44 with open(toml_file, "w") as file: 45 tomlkit.dump(toml_config, file) 46 [end of hack/python-sdk/update_release_version_helper.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hack/python-sdk/update_release_version_helper.py b/hack/python-sdk/update_release_version_helper.py --- a/hack/python-sdk/update_release_version_helper.py +++ b/hack/python-sdk/update_release_version_helper.py @@ -24,7 +24,6 @@ toml_files = [ "python/kserve/pyproject.toml", "python/aiffairness/pyproject.toml", - "python/aixexplainer/pyproject.toml", "python/alibiexplainer/pyproject.toml", "python/artexplainer/pyproject.toml", "python/custom_model/pyproject.toml",
{"golden_diff": "diff --git a/hack/python-sdk/update_release_version_helper.py b/hack/python-sdk/update_release_version_helper.py\n--- a/hack/python-sdk/update_release_version_helper.py\n+++ b/hack/python-sdk/update_release_version_helper.py\n@@ -24,7 +24,6 @@\n toml_files = [\n \"python/kserve/pyproject.toml\",\n \"python/aiffairness/pyproject.toml\",\n- \"python/aixexplainer/pyproject.toml\",\n \"python/alibiexplainer/pyproject.toml\",\n \"python/artexplainer/pyproject.toml\",\n \"python/custom_model/pyproject.toml\",\n", "issue": "No matches for kind \\\"HorizontalPodAutoscaler\\\" in version \\\"autoscaling/v2beta2\\\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nDeploy kserve in raw mode on kubernetes 1.26 where autoscaling/v2beta2 is no longer available\r\n\r\n\r\n**What did you expect to happen:**\r\nKserve should support v2 of the api\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright 2023 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tomlkit\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Update release version in python toml files\")\nparser.add_argument(\"version\", type=str, help=\"release version\")\nargs, _ = parser.parse_known_args()\n\ntoml_files = [\n \"python/kserve/pyproject.toml\",\n \"python/aiffairness/pyproject.toml\",\n \"python/aixexplainer/pyproject.toml\",\n \"python/alibiexplainer/pyproject.toml\",\n \"python/artexplainer/pyproject.toml\",\n \"python/custom_model/pyproject.toml\",\n \"python/custom_transformer/pyproject.toml\",\n \"python/lgbserver/pyproject.toml\",\n \"python/paddleserver/pyproject.toml\",\n \"python/pmmlserver/pyproject.toml\",\n \"python/sklearnserver/pyproject.toml\",\n \"python/xgbserver/pyproject.toml\",\n]\n\nfor toml_file in toml_files:\n with open(toml_file, \"r\") as file:\n toml_config = tomlkit.load(file)\n toml_config['tool']['poetry']['version'] = args.version\n\n with open(toml_file, \"w\") as file:\n tomlkit.dump(toml_config, file)\n", "path": "hack/python-sdk/update_release_version_helper.py"}]}
1,116
134
gh_patches_debug_14038
rasdani/github-patches
git_diff
microsoft__promptflow-3244
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Fallback to DefaultAzureCredential isn't compatible with AzureDeveloperCliCredential **Describe the bug** PromptFlow currently falls back to using DefaultAzureCredential and fetching a token for the Oauth 1.0 audience. That is not compatible with all credentials in the chain, specifically, the AzureDeveloperCliCredential which is Oauth 2.0 only. This PR changes the audience value to the correct scope value, and renames variables accordingly. **How To Reproduce the bug** Steps to reproduce the behavior, how frequent can you experience the bug: 1. Run `azd auth login` and DONT login to Azure CLI 2. Run ai-rag-chat-evaluator or other sample code that does *not* specify a credential when using the evaluators/evaluate function PR incoming! </issue> <code> [start of src/promptflow-core/promptflow/_core/token_provider.py] 1 import threading 2 from abc import ABC, abstractmethod 3 from promptflow.exceptions import UserErrorException 4 from promptflow._utils.credential_utils import get_default_azure_credential 5 6 7 # to access azure ai services, we need to get the token with this audience 8 COGNITIVE_AUDIENCE = "https://cognitiveservices.azure.com/" 9 10 11 class TokenProviderABC(ABC): 12 def __init__(self) -> None: 13 super().__init__() 14 15 @abstractmethod 16 def get_token(self) -> str: 17 pass 18 19 20 class StaticTokenProvider(TokenProviderABC): 21 def __init__(self, token: str) -> None: 22 super().__init__() 23 self.token = token 24 25 def get_token(self) -> str: 26 return self.token 27 28 29 class AzureTokenProvider(TokenProviderABC): 30 _instance_lock = threading.Lock() 31 _instance = None 32 33 def __new__(cls, *args, **kwargs): 34 with cls._instance_lock: 35 if not cls._instance: 36 cls._instance = super().__new__(cls) 37 cls._instance._init_instance() 38 return cls._instance 39 40 def _init_instance(self): 41 try: 42 # Initialize a credential instance 43 self.credential = get_default_azure_credential() 44 except ImportError as ex: 45 raise UserErrorException( 46 "Failed to initialize AzureTokenProvider. " 47 + f"Please try 'pip install azure.identity' to install dependency, {ex.msg}." 48 ) 49 50 def get_token(self): 51 audience = COGNITIVE_AUDIENCE 52 return self.credential.get_token(audience).token 53 [end of src/promptflow-core/promptflow/_core/token_provider.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/promptflow-core/promptflow/_core/token_provider.py b/src/promptflow-core/promptflow/_core/token_provider.py --- a/src/promptflow-core/promptflow/_core/token_provider.py +++ b/src/promptflow-core/promptflow/_core/token_provider.py @@ -4,8 +4,8 @@ from promptflow._utils.credential_utils import get_default_azure_credential -# to access azure ai services, we need to get the token with this audience -COGNITIVE_AUDIENCE = "https://cognitiveservices.azure.com/" +# to access azure ai services, we need to get the token with this scope +COGNITIVE_SCOPE = "https://cognitiveservices.azure.com/.default" class TokenProviderABC(ABC): @@ -48,5 +48,5 @@ ) def get_token(self): - audience = COGNITIVE_AUDIENCE - return self.credential.get_token(audience).token + scope = COGNITIVE_SCOPE + return self.credential.get_token(scope).token
{"golden_diff": "diff --git a/src/promptflow-core/promptflow/_core/token_provider.py b/src/promptflow-core/promptflow/_core/token_provider.py\n--- a/src/promptflow-core/promptflow/_core/token_provider.py\n+++ b/src/promptflow-core/promptflow/_core/token_provider.py\n@@ -4,8 +4,8 @@\n from promptflow._utils.credential_utils import get_default_azure_credential\n \n \n-# to access azure ai services, we need to get the token with this audience\n-COGNITIVE_AUDIENCE = \"https://cognitiveservices.azure.com/\"\n+# to access azure ai services, we need to get the token with this scope\n+COGNITIVE_SCOPE = \"https://cognitiveservices.azure.com/.default\"\n \n \n class TokenProviderABC(ABC):\n@@ -48,5 +48,5 @@\n )\n \n def get_token(self):\n- audience = COGNITIVE_AUDIENCE\n- return self.credential.get_token(audience).token\n+ scope = COGNITIVE_SCOPE\n+ return self.credential.get_token(scope).token\n", "issue": "[BUG] Fallback to DefaultAzureCredential isn't compatible with AzureDeveloperCliCredential\n**Describe the bug**\r\n\r\nPromptFlow currently falls back to using DefaultAzureCredential and fetching a token for the Oauth 1.0 audience. That is not compatible with all credentials in the chain, specifically, the AzureDeveloperCliCredential which is Oauth 2.0 only. This PR changes the audience value to the correct scope value, and renames variables accordingly.\r\n\r\n\r\n**How To Reproduce the bug**\r\nSteps to reproduce the behavior, how frequent can you experience the bug:\r\n1. Run `azd auth login` and DONT login to Azure CLI\r\n2. Run ai-rag-chat-evaluator or other sample code that does *not* specify a credential when using the evaluators/evaluate function\r\n\r\n\r\nPR incoming!\n", "before_files": [{"content": "import threading\nfrom abc import ABC, abstractmethod\nfrom promptflow.exceptions import UserErrorException\nfrom promptflow._utils.credential_utils import get_default_azure_credential\n\n\n# to access azure ai services, we need to get the token with this audience\nCOGNITIVE_AUDIENCE = \"https://cognitiveservices.azure.com/\"\n\n\nclass TokenProviderABC(ABC):\n def __init__(self) -> None:\n super().__init__()\n\n @abstractmethod\n def get_token(self) -> str:\n pass\n\n\nclass StaticTokenProvider(TokenProviderABC):\n def __init__(self, token: str) -> None:\n super().__init__()\n self.token = token\n\n def get_token(self) -> str:\n return self.token\n\n\nclass AzureTokenProvider(TokenProviderABC):\n _instance_lock = threading.Lock()\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n with cls._instance_lock:\n if not cls._instance:\n cls._instance = super().__new__(cls)\n cls._instance._init_instance()\n return cls._instance\n\n def _init_instance(self):\n try:\n # Initialize a credential instance\n self.credential = get_default_azure_credential()\n except ImportError as ex:\n raise UserErrorException(\n \"Failed to initialize AzureTokenProvider. \"\n + f\"Please try 'pip install azure.identity' to install dependency, {ex.msg}.\"\n )\n\n def get_token(self):\n audience = COGNITIVE_AUDIENCE\n return self.credential.get_token(audience).token\n", "path": "src/promptflow-core/promptflow/_core/token_provider.py"}]}
1,157
236
gh_patches_debug_19830
rasdani/github-patches
git_diff
yt-project__yt-4519
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DEPR: `VisibleDeprecationWarning` considered noisy Context: yt doesn't use builtin `DeprecationWarning`s but instead uses a custom class `VisibleDeprecationWarning`, whose sole intent is to work around Python's default warning filters which hide `DeprecationWarning`s by default (the idea being that they should only be made visible if users opt-in). The design currently used in yt made sense a while back, because we want deprecation warnings to reach all users, not just maintainers of dependent code with high quality standards. The trade-off being that our warnings will surface to users no matter how many layers of dependencies there are between the code being written and yt; for instance, if `yt_astro_analysis` or `trident` uses deprecated yt API, *their* users don't have much control over it and it can be argued they shouldn't see these "noisy" warnings by default. Since Python 3.7 ([PEP 565](https://peps.python.org/pep-0565/)), `DeprecationWarnings` attributed to the `__main__` module are again showed by default, meaning they will reach direct users (including in REPLs) but will not surface (by default) when deprecated API calls live in an intermediate dependency layer. It seems to me that our current solution isn't necessary anymore and that we should use simple `DeprecationWarning`s from now on. </issue> <code> [start of yt/_maintenance/deprecation.py] 1 import warnings 2 from functools import wraps 3 from types import FunctionType 4 from typing import Dict, Optional 5 6 7 class VisibleDeprecationWarning(UserWarning): 8 """Visible deprecation warning, adapted from NumPy 9 10 The nose runner does not show users DeprecationWarning. 11 This ensures that a deprecation warning is visible to users 12 if that is desired. 13 """ 14 15 # this class becomes useless after the tests are migrated from nose to pytest 16 17 pass 18 19 20 def issue_deprecation_warning( 21 msg: str, 22 *, 23 stacklevel: int, 24 since: str, 25 removal: Optional[str] = None, 26 ): 27 """ 28 Parameters 29 ---------- 30 msg : str 31 A text message explaining that the code surrounding the call to this function is 32 deprecated, and what should be changed on the user side to avoid it. 33 34 stacklevel: int 35 Number of stack frames to be skipped when pointing at caller code, starting from 36 *this* function's frame. In general 3 is a minimum. 37 38 since and removal: str version numbers, indicating the anticipated removal date 39 40 Notes 41 ----- 42 43 removal can be left empty if it is not clear how many minor releases are expected to 44 happen before the next major. 45 46 removal and since arguments are keyword-only to forbid accidentally swapping them. 47 48 Examples 49 -------- 50 >>> issue_deprecation_warning( 51 ... "This code is deprecated.", stacklevel=3, since="4.0" 52 ... ) 53 """ 54 55 msg += f"\nDeprecated since yt {since}" 56 if removal is not None: 57 msg += f"\nThis feature is planned for removal in yt {removal}" 58 warnings.warn(msg, VisibleDeprecationWarning, stacklevel=stacklevel) 59 60 61 def future_positional_only(positions2names: Dict[int, str], /, **depr_kwargs): 62 """Warn users when using a future positional-only argument as keyword. 63 Note that positional-only arguments are available from Python 3.8 64 See https://www.python.org/dev/peps/pep-0570/ 65 """ 66 67 def outer(func: FunctionType): 68 @wraps(func) 69 def inner(*args, **kwargs): 70 for no, name in sorted(positions2names.items()): 71 if name not in kwargs: 72 continue 73 value = kwargs[name] 74 issue_deprecation_warning( 75 f"Using the {name!r} argument as keyword (on position {no}) " 76 "is deprecated. " 77 "Pass the argument as positional to suppress this warning, " 78 f"i.e., use {func.__name__}({value!r}, ...)", 79 stacklevel=3, 80 **depr_kwargs, 81 ) 82 return func(*args, **kwargs) 83 84 return inner 85 86 return outer 87 [end of yt/_maintenance/deprecation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/yt/_maintenance/deprecation.py b/yt/_maintenance/deprecation.py --- a/yt/_maintenance/deprecation.py +++ b/yt/_maintenance/deprecation.py @@ -4,19 +4,6 @@ from typing import Dict, Optional -class VisibleDeprecationWarning(UserWarning): - """Visible deprecation warning, adapted from NumPy - - The nose runner does not show users DeprecationWarning. - This ensures that a deprecation warning is visible to users - if that is desired. - """ - - # this class becomes useless after the tests are migrated from nose to pytest - - pass - - def issue_deprecation_warning( msg: str, *, @@ -55,7 +42,7 @@ msg += f"\nDeprecated since yt {since}" if removal is not None: msg += f"\nThis feature is planned for removal in yt {removal}" - warnings.warn(msg, VisibleDeprecationWarning, stacklevel=stacklevel) + warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel) def future_positional_only(positions2names: Dict[int, str], /, **depr_kwargs):
{"golden_diff": "diff --git a/yt/_maintenance/deprecation.py b/yt/_maintenance/deprecation.py\n--- a/yt/_maintenance/deprecation.py\n+++ b/yt/_maintenance/deprecation.py\n@@ -4,19 +4,6 @@\n from typing import Dict, Optional\n \n \n-class VisibleDeprecationWarning(UserWarning):\n- \"\"\"Visible deprecation warning, adapted from NumPy\n-\n- The nose runner does not show users DeprecationWarning.\n- This ensures that a deprecation warning is visible to users\n- if that is desired.\n- \"\"\"\n-\n- # this class becomes useless after the tests are migrated from nose to pytest\n-\n- pass\n-\n-\n def issue_deprecation_warning(\n msg: str,\n *,\n@@ -55,7 +42,7 @@\n msg += f\"\\nDeprecated since yt {since}\"\n if removal is not None:\n msg += f\"\\nThis feature is planned for removal in yt {removal}\"\n- warnings.warn(msg, VisibleDeprecationWarning, stacklevel=stacklevel)\n+ warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel)\n \n \n def future_positional_only(positions2names: Dict[int, str], /, **depr_kwargs):\n", "issue": "DEPR: `VisibleDeprecationWarning` considered noisy\nContext:\r\nyt doesn't use builtin `DeprecationWarning`s but instead uses a custom class `VisibleDeprecationWarning`, whose sole intent is to work around Python's default warning filters which hide `DeprecationWarning`s by default (the idea being that they should only be made visible if users opt-in).\r\n\r\nThe design currently used in yt made sense a while back, because we want deprecation warnings to reach all users, not just maintainers of dependent code with high quality standards. The trade-off being that our warnings will surface to users no matter how many layers of dependencies there are between the code being written and yt; for instance, if `yt_astro_analysis` or `trident` uses deprecated yt API, *their* users don't have much control over it and it can be argued they shouldn't see these \"noisy\" warnings by default. \r\n\r\nSince Python 3.7 ([PEP 565](https://peps.python.org/pep-0565/)), `DeprecationWarnings` attributed to the `__main__` module are again showed by default, meaning they will reach direct users (including in REPLs) but will not surface (by default) when deprecated API calls live in an intermediate dependency layer.\r\n\r\nIt seems to me that our current solution isn't necessary anymore and that we should use simple `DeprecationWarning`s from now on.\n", "before_files": [{"content": "import warnings\nfrom functools import wraps\nfrom types import FunctionType\nfrom typing import Dict, Optional\n\n\nclass VisibleDeprecationWarning(UserWarning):\n \"\"\"Visible deprecation warning, adapted from NumPy\n\n The nose runner does not show users DeprecationWarning.\n This ensures that a deprecation warning is visible to users\n if that is desired.\n \"\"\"\n\n # this class becomes useless after the tests are migrated from nose to pytest\n\n pass\n\n\ndef issue_deprecation_warning(\n msg: str,\n *,\n stacklevel: int,\n since: str,\n removal: Optional[str] = None,\n):\n \"\"\"\n Parameters\n ----------\n msg : str\n A text message explaining that the code surrounding the call to this function is\n deprecated, and what should be changed on the user side to avoid it.\n\n stacklevel: int\n Number of stack frames to be skipped when pointing at caller code, starting from\n *this* function's frame. In general 3 is a minimum.\n\n since and removal: str version numbers, indicating the anticipated removal date\n\n Notes\n -----\n\n removal can be left empty if it is not clear how many minor releases are expected to\n happen before the next major.\n\n removal and since arguments are keyword-only to forbid accidentally swapping them.\n\n Examples\n --------\n >>> issue_deprecation_warning(\n ... \"This code is deprecated.\", stacklevel=3, since=\"4.0\"\n ... )\n \"\"\"\n\n msg += f\"\\nDeprecated since yt {since}\"\n if removal is not None:\n msg += f\"\\nThis feature is planned for removal in yt {removal}\"\n warnings.warn(msg, VisibleDeprecationWarning, stacklevel=stacklevel)\n\n\ndef future_positional_only(positions2names: Dict[int, str], /, **depr_kwargs):\n \"\"\"Warn users when using a future positional-only argument as keyword.\n Note that positional-only arguments are available from Python 3.8\n See https://www.python.org/dev/peps/pep-0570/\n \"\"\"\n\n def outer(func: FunctionType):\n @wraps(func)\n def inner(*args, **kwargs):\n for no, name in sorted(positions2names.items()):\n if name not in kwargs:\n continue\n value = kwargs[name]\n issue_deprecation_warning(\n f\"Using the {name!r} argument as keyword (on position {no}) \"\n \"is deprecated. \"\n \"Pass the argument as positional to suppress this warning, \"\n f\"i.e., use {func.__name__}({value!r}, ...)\",\n stacklevel=3,\n **depr_kwargs,\n )\n return func(*args, **kwargs)\n\n return inner\n\n return outer\n", "path": "yt/_maintenance/deprecation.py"}]}
1,607
266
gh_patches_debug_8912
rasdani/github-patches
git_diff
yt-project__yt-3847
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DOC: private methods used in cookbook recipe ### Bug report **Bug summary** in #2567, `doc/source/cookbook/render_two_fields_tf.py` was added to the cookbook On the two following lines, the private method `_get_field_info` is used https://github.com/yt-project/yt/blob/ef08989d8222e7172751d17009959102db05dd20/doc/source/cookbook/render_two_fields_tf.py#L15 https://github.com/yt-project/yt/blob/ef08989d8222e7172751d17009959102db05dd20/doc/source/cookbook/render_two_fields_tf.py#L30 This should be refactored to avoid showing private elements @zingale, do you know how we could do this ? </issue> <code> [start of doc/source/cookbook/render_two_fields_tf.py] 1 import numpy as np 2 3 import yt 4 from yt.visualization.volume_rendering.api import Scene, create_volume_source 5 6 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") 7 8 # create a scene and add volume sources to it 9 10 sc = Scene() 11 12 # Add density 13 14 field = "density" 15 ds._get_field_info(field).take_log = True 16 17 vol = create_volume_source(ds, field=field) 18 vol.use_ghost_zones = True 19 20 tf = yt.ColorTransferFunction([-28, -25]) 21 tf.clear() 22 tf.add_layers(4, 0.02, alpha=np.logspace(-3, -1, 4), colormap="winter") 23 24 vol.set_transfer_function(tf) 25 sc.add_source(vol) 26 27 # Add temperature 28 29 field = "temperature" 30 ds._get_field_info(field).take_log = True 31 32 vol2 = create_volume_source(ds, field=field) 33 vol2.use_ghost_zones = True 34 35 tf = yt.ColorTransferFunction([4.5, 7.5]) 36 tf.clear() 37 tf.add_layers(4, 0.02, alpha=np.logspace(-0.2, 0, 4), colormap="autumn") 38 39 vol2.set_transfer_function(tf) 40 sc.add_source(vol2) 41 42 # setup the camera 43 44 cam = sc.add_camera(ds, lens_type="perspective") 45 cam.resolution = (1600, 900) 46 cam.zoom(20.0) 47 48 # Render the image. 49 50 sc.render() 51 52 sc.save_annotated( 53 "render_two_fields_tf.png", 54 sigma_clip=6.0, 55 tf_rect=[0.88, 0.15, 0.03, 0.8], 56 render=False, 57 ) 58 [end of doc/source/cookbook/render_two_fields_tf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/doc/source/cookbook/render_two_fields_tf.py b/doc/source/cookbook/render_two_fields_tf.py --- a/doc/source/cookbook/render_two_fields_tf.py +++ b/doc/source/cookbook/render_two_fields_tf.py @@ -12,7 +12,6 @@ # Add density field = "density" -ds._get_field_info(field).take_log = True vol = create_volume_source(ds, field=field) vol.use_ghost_zones = True @@ -27,7 +26,6 @@ # Add temperature field = "temperature" -ds._get_field_info(field).take_log = True vol2 = create_volume_source(ds, field=field) vol2.use_ghost_zones = True
{"golden_diff": "diff --git a/doc/source/cookbook/render_two_fields_tf.py b/doc/source/cookbook/render_two_fields_tf.py\n--- a/doc/source/cookbook/render_two_fields_tf.py\n+++ b/doc/source/cookbook/render_two_fields_tf.py\n@@ -12,7 +12,6 @@\n # Add density\n \n field = \"density\"\n-ds._get_field_info(field).take_log = True\n \n vol = create_volume_source(ds, field=field)\n vol.use_ghost_zones = True\n@@ -27,7 +26,6 @@\n # Add temperature\n \n field = \"temperature\"\n-ds._get_field_info(field).take_log = True\n \n vol2 = create_volume_source(ds, field=field)\n vol2.use_ghost_zones = True\n", "issue": "DOC: private methods used in cookbook recipe\n### Bug report\r\n\r\n**Bug summary**\r\n\r\nin #2567, `doc/source/cookbook/render_two_fields_tf.py` was added to the cookbook\r\nOn the two following lines, the private method `_get_field_info` is used\r\nhttps://github.com/yt-project/yt/blob/ef08989d8222e7172751d17009959102db05dd20/doc/source/cookbook/render_two_fields_tf.py#L15\r\nhttps://github.com/yt-project/yt/blob/ef08989d8222e7172751d17009959102db05dd20/doc/source/cookbook/render_two_fields_tf.py#L30\r\n\r\nThis should be refactored to avoid showing private elements\r\n@zingale, do you know how we could do this ?\n", "before_files": [{"content": "import numpy as np\n\nimport yt\nfrom yt.visualization.volume_rendering.api import Scene, create_volume_source\n\nds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n\n# create a scene and add volume sources to it\n\nsc = Scene()\n\n# Add density\n\nfield = \"density\"\nds._get_field_info(field).take_log = True\n\nvol = create_volume_source(ds, field=field)\nvol.use_ghost_zones = True\n\ntf = yt.ColorTransferFunction([-28, -25])\ntf.clear()\ntf.add_layers(4, 0.02, alpha=np.logspace(-3, -1, 4), colormap=\"winter\")\n\nvol.set_transfer_function(tf)\nsc.add_source(vol)\n\n# Add temperature\n\nfield = \"temperature\"\nds._get_field_info(field).take_log = True\n\nvol2 = create_volume_source(ds, field=field)\nvol2.use_ghost_zones = True\n\ntf = yt.ColorTransferFunction([4.5, 7.5])\ntf.clear()\ntf.add_layers(4, 0.02, alpha=np.logspace(-0.2, 0, 4), colormap=\"autumn\")\n\nvol2.set_transfer_function(tf)\nsc.add_source(vol2)\n\n# setup the camera\n\ncam = sc.add_camera(ds, lens_type=\"perspective\")\ncam.resolution = (1600, 900)\ncam.zoom(20.0)\n\n# Render the image.\n\nsc.render()\n\nsc.save_annotated(\n \"render_two_fields_tf.png\",\n sigma_clip=6.0,\n tf_rect=[0.88, 0.15, 0.03, 0.8],\n render=False,\n)\n", "path": "doc/source/cookbook/render_two_fields_tf.py"}]}
1,246
159
gh_patches_debug_37024
rasdani/github-patches
git_diff
comic__grand-challenge.org-1936
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Challenge creation notifications At the moment, staff users receive emails about new challenges. It would be nice to turns those emails into notifications. **Challenge creation emails** - create follow: `staff users` --> `gc.localhost Site` - upon challenge creation: send `action` with challenge `creator` as `actor`, the created `challenge` as `action_object` and the `gc.localhost site` as `target` - _Notification template:_ print action as is for normal challenges, for external challenges, make sure to link the `update_url` for the challenge in the notification rather than the challenge url </issue> <code> [start of app/grandchallenge/challenges/emails.py] 1 from django.conf import settings 2 from django.contrib.auth import get_user_model 3 from django.contrib.sites.models import Site 4 from django.core.mail import send_mail 5 6 from grandchallenge.subdomains.utils import reverse 7 8 9 def send_challenge_created_email(challenge): 10 site = Site.objects.get_current() 11 message = ( 12 f"Dear {{}},\n\n" 13 f"User {challenge.creator} has just created the challenge " 14 f"{challenge.short_name} at {challenge.get_absolute_url()}.\n\n" 15 f"Regards,\n" 16 f"{site.name}\n\n" 17 f"This is an automated service email from {site.domain}." 18 ) 19 20 staff = get_user_model().objects.filter(is_staff=True) 21 22 for s in staff: 23 send_mail( 24 subject=f"[{site.domain.lower()}] New Challenge Created", 25 message=message.format(s.username), 26 from_email=settings.DEFAULT_FROM_EMAIL, 27 recipient_list=[s.email], 28 ) 29 30 31 def send_external_challenge_created_email(challenge): 32 site = Site.objects.get_current() 33 update_url = reverse( 34 "challenges:external-update", 35 kwargs={"short_name": challenge.short_name}, 36 ) 37 38 message = ( 39 f"Dear {{}},\n\n" 40 f"User {challenge.creator} has just created the challenge " 41 f"{challenge.short_name}. You need to un-hide it before it is visible " 42 f"on the all challenges page, you can do that here: {update_url}\n\n" 43 f"Regards,\n" 44 f"{site.name}\n\n" 45 f"This is an automated service email from {site.domain}." 46 ) 47 48 staff = get_user_model().objects.filter(is_staff=True) 49 50 for s in staff: 51 send_mail( 52 subject=f"[{site.domain.lower()}] New External Challenge", 53 message=message.format(s.username), 54 from_email=settings.DEFAULT_FROM_EMAIL, 55 recipient_list=[s.email], 56 ) 57 [end of app/grandchallenge/challenges/emails.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/grandchallenge/challenges/emails.py b/app/grandchallenge/challenges/emails.py --- a/app/grandchallenge/challenges/emails.py +++ b/app/grandchallenge/challenges/emails.py @@ -1,7 +1,5 @@ -from django.conf import settings -from django.contrib.auth import get_user_model from django.contrib.sites.models import Site -from django.core.mail import send_mail +from django.core.mail import mail_managers from grandchallenge.subdomains.utils import reverse @@ -9,7 +7,7 @@ def send_challenge_created_email(challenge): site = Site.objects.get_current() message = ( - f"Dear {{}},\n\n" + f"Dear manager,\n\n" f"User {challenge.creator} has just created the challenge " f"{challenge.short_name} at {challenge.get_absolute_url()}.\n\n" f"Regards,\n" @@ -17,15 +15,10 @@ f"This is an automated service email from {site.domain}." ) - staff = get_user_model().objects.filter(is_staff=True) - - for s in staff: - send_mail( - subject=f"[{site.domain.lower()}] New Challenge Created", - message=message.format(s.username), - from_email=settings.DEFAULT_FROM_EMAIL, - recipient_list=[s.email], - ) + mail_managers( + subject=f"[{site.domain.lower()}] New Challenge Created", + message=message, + ) def send_external_challenge_created_email(challenge): @@ -36,7 +29,7 @@ ) message = ( - f"Dear {{}},\n\n" + f"Dear manager,\n\n" f"User {challenge.creator} has just created the challenge " f"{challenge.short_name}. You need to un-hide it before it is visible " f"on the all challenges page, you can do that here: {update_url}\n\n" @@ -45,12 +38,7 @@ f"This is an automated service email from {site.domain}." ) - staff = get_user_model().objects.filter(is_staff=True) - - for s in staff: - send_mail( - subject=f"[{site.domain.lower()}] New External Challenge", - message=message.format(s.username), - from_email=settings.DEFAULT_FROM_EMAIL, - recipient_list=[s.email], - ) + mail_managers( + subject=f"[{site.domain.lower()}] New External Challenge", + message=message, + )
{"golden_diff": "diff --git a/app/grandchallenge/challenges/emails.py b/app/grandchallenge/challenges/emails.py\n--- a/app/grandchallenge/challenges/emails.py\n+++ b/app/grandchallenge/challenges/emails.py\n@@ -1,7 +1,5 @@\n-from django.conf import settings\n-from django.contrib.auth import get_user_model\n from django.contrib.sites.models import Site\n-from django.core.mail import send_mail\n+from django.core.mail import mail_managers\n \n from grandchallenge.subdomains.utils import reverse\n \n@@ -9,7 +7,7 @@\n def send_challenge_created_email(challenge):\n site = Site.objects.get_current()\n message = (\n- f\"Dear {{}},\\n\\n\"\n+ f\"Dear manager,\\n\\n\"\n f\"User {challenge.creator} has just created the challenge \"\n f\"{challenge.short_name} at {challenge.get_absolute_url()}.\\n\\n\"\n f\"Regards,\\n\"\n@@ -17,15 +15,10 @@\n f\"This is an automated service email from {site.domain}.\"\n )\n \n- staff = get_user_model().objects.filter(is_staff=True)\n-\n- for s in staff:\n- send_mail(\n- subject=f\"[{site.domain.lower()}] New Challenge Created\",\n- message=message.format(s.username),\n- from_email=settings.DEFAULT_FROM_EMAIL,\n- recipient_list=[s.email],\n- )\n+ mail_managers(\n+ subject=f\"[{site.domain.lower()}] New Challenge Created\",\n+ message=message,\n+ )\n \n \n def send_external_challenge_created_email(challenge):\n@@ -36,7 +29,7 @@\n )\n \n message = (\n- f\"Dear {{}},\\n\\n\"\n+ f\"Dear manager,\\n\\n\"\n f\"User {challenge.creator} has just created the challenge \"\n f\"{challenge.short_name}. You need to un-hide it before it is visible \"\n f\"on the all challenges page, you can do that here: {update_url}\\n\\n\"\n@@ -45,12 +38,7 @@\n f\"This is an automated service email from {site.domain}.\"\n )\n \n- staff = get_user_model().objects.filter(is_staff=True)\n-\n- for s in staff:\n- send_mail(\n- subject=f\"[{site.domain.lower()}] New External Challenge\",\n- message=message.format(s.username),\n- from_email=settings.DEFAULT_FROM_EMAIL,\n- recipient_list=[s.email],\n- )\n+ mail_managers(\n+ subject=f\"[{site.domain.lower()}] New External Challenge\",\n+ message=message,\n+ )\n", "issue": "Challenge creation notifications\nAt the moment, staff users receive emails about new challenges. It would be nice to turns those emails into notifications. \r\n\r\n**Challenge creation emails**\r\n\r\n- create follow: `staff users` --> `gc.localhost Site`\r\n- upon challenge creation: send `action` with challenge `creator` as `actor`, the created `challenge` as `action_object` and the `gc.localhost site` as `target`\r\n- _Notification template:_ print action as is for normal challenges, for external challenges, make sure to link the `update_url` for the challenge in the notification rather than the challenge url\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.sites.models import Site\nfrom django.core.mail import send_mail\n\nfrom grandchallenge.subdomains.utils import reverse\n\n\ndef send_challenge_created_email(challenge):\n site = Site.objects.get_current()\n message = (\n f\"Dear {{}},\\n\\n\"\n f\"User {challenge.creator} has just created the challenge \"\n f\"{challenge.short_name} at {challenge.get_absolute_url()}.\\n\\n\"\n f\"Regards,\\n\"\n f\"{site.name}\\n\\n\"\n f\"This is an automated service email from {site.domain}.\"\n )\n\n staff = get_user_model().objects.filter(is_staff=True)\n\n for s in staff:\n send_mail(\n subject=f\"[{site.domain.lower()}] New Challenge Created\",\n message=message.format(s.username),\n from_email=settings.DEFAULT_FROM_EMAIL,\n recipient_list=[s.email],\n )\n\n\ndef send_external_challenge_created_email(challenge):\n site = Site.objects.get_current()\n update_url = reverse(\n \"challenges:external-update\",\n kwargs={\"short_name\": challenge.short_name},\n )\n\n message = (\n f\"Dear {{}},\\n\\n\"\n f\"User {challenge.creator} has just created the challenge \"\n f\"{challenge.short_name}. You need to un-hide it before it is visible \"\n f\"on the all challenges page, you can do that here: {update_url}\\n\\n\"\n f\"Regards,\\n\"\n f\"{site.name}\\n\\n\"\n f\"This is an automated service email from {site.domain}.\"\n )\n\n staff = get_user_model().objects.filter(is_staff=True)\n\n for s in staff:\n send_mail(\n subject=f\"[{site.domain.lower()}] New External Challenge\",\n message=message.format(s.username),\n from_email=settings.DEFAULT_FROM_EMAIL,\n recipient_list=[s.email],\n )\n", "path": "app/grandchallenge/challenges/emails.py"}]}
1,186
563
gh_patches_debug_438
rasdani/github-patches
git_diff
OpenMined__PySyft-155
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Set up CI for automated testing and style checks Now that our codebase is growing (hooray!), we should set up CI for automated testing and style checks (PEP8, PEP257). Choices include [CircleCI](https://circleci.com) and [TravisCI](https://travis-ci.org). These can be integrated into our repo such that every pull request will be checked before review. </issue> <code> [start of setup.py] 1 import os 2 from setuptools import setup,find_packages 3 4 # Utility function to read the README file. 5 # Used for the long_description. It's nice, because now 1) we have a top level 6 # README file and 2) it's easier to type in the README file than to put a raw 7 # string in below ... 8 def read(fname): 9 return open(os.path.join(os.path.dirname(__file__), fname)).read() 10 11 requirements = read('requirements.txt').split() 12 13 setup( 14 name = "syft", 15 version = "0.1.0", 16 author = "Amber Trask", 17 author_email = "[email protected]", 18 description = ("A library for Homomorphically Encrypted Deep Learning Algorithms"), 19 license = "Apache-2.0", 20 keywords = "deep learning machine artificial intelligence homomorphic encryption", 21 packages=find_packages(exclude=['notebooks', 'test*','dist']), 22 include_package_data=True, 23 long_description=read('README.md'), 24 url='github.com/OpenMined/Syft', 25 classifiers=[ 26 "Development Status :: 1 - Alpha", 27 ], 28 scripts=['bin/syft_cmd'], 29 install_requires=requirements, 30 setup_requires=['pytest-runner'], 31 tests_require=['pytest'] 32 ) 33 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -28,5 +28,5 @@ scripts=['bin/syft_cmd'], install_requires=requirements, setup_requires=['pytest-runner'], - tests_require=['pytest'] + tests_require=['pytest', 'pytest-flake8'] )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -28,5 +28,5 @@\n scripts=['bin/syft_cmd'],\n install_requires=requirements,\n setup_requires=['pytest-runner'],\n- tests_require=['pytest']\n+ tests_require=['pytest', 'pytest-flake8']\n )\n", "issue": "Set up CI for automated testing and style checks\nNow that our codebase is growing (hooray!), we should set up CI for automated testing and style checks (PEP8, PEP257). \r\n\r\nChoices include [CircleCI](https://circleci.com) and [TravisCI](https://travis-ci.org). These can be integrated into our repo such that every pull request will be checked before review. \n", "before_files": [{"content": "import os\nfrom setuptools import setup,find_packages\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nrequirements = read('requirements.txt').split()\n\nsetup(\n name = \"syft\",\n version = \"0.1.0\",\n author = \"Amber Trask\",\n author_email = \"[email protected]\",\n description = (\"A library for Homomorphically Encrypted Deep Learning Algorithms\"),\n license = \"Apache-2.0\",\n keywords = \"deep learning machine artificial intelligence homomorphic encryption\",\n packages=find_packages(exclude=['notebooks', 'test*','dist']),\n include_package_data=True,\n long_description=read('README.md'),\n url='github.com/OpenMined/Syft',\n classifiers=[\n \"Development Status :: 1 - Alpha\",\n ],\n scripts=['bin/syft_cmd'],\n install_requires=requirements,\n setup_requires=['pytest-runner'],\n tests_require=['pytest']\n)\n", "path": "setup.py"}]}
944
76
gh_patches_debug_6021
rasdani/github-patches
git_diff
scrapy__scrapy-6334
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 'json' selector type not documented https://docs.scrapy.org/en/latest/topics/selectors.html `type defines the selector type, it can be "html", "xml" or None (default).` But my tests are now failing because it's returning `json` type for actual json docs now. </issue> <code> [start of scrapy/selector/unified.py] 1 """ 2 XPath selectors based on lxml 3 """ 4 5 from typing import Any, Optional, Type, Union 6 7 from parsel import Selector as _ParselSelector 8 9 from scrapy.http import HtmlResponse, TextResponse, XmlResponse 10 from scrapy.utils.python import to_bytes 11 from scrapy.utils.response import get_base_url 12 from scrapy.utils.trackref import object_ref 13 14 __all__ = ["Selector", "SelectorList"] 15 16 _NOT_SET = object() 17 18 19 def _st(response: Optional[TextResponse], st: Optional[str]) -> str: 20 if st is None: 21 return "xml" if isinstance(response, XmlResponse) else "html" 22 return st 23 24 25 def _response_from_text(text: Union[str, bytes], st: Optional[str]) -> TextResponse: 26 rt: Type[TextResponse] = XmlResponse if st == "xml" else HtmlResponse 27 return rt(url="about:blank", encoding="utf-8", body=to_bytes(text, "utf-8")) 28 29 30 class SelectorList(_ParselSelector.selectorlist_cls, object_ref): 31 """ 32 The :class:`SelectorList` class is a subclass of the builtin ``list`` 33 class, which provides a few additional methods. 34 """ 35 36 37 class Selector(_ParselSelector, object_ref): 38 """ 39 An instance of :class:`Selector` is a wrapper over response to select 40 certain parts of its content. 41 42 ``response`` is an :class:`~scrapy.http.HtmlResponse` or an 43 :class:`~scrapy.http.XmlResponse` object that will be used for selecting 44 and extracting data. 45 46 ``text`` is a unicode string or utf-8 encoded text for cases when a 47 ``response`` isn't available. Using ``text`` and ``response`` together is 48 undefined behavior. 49 50 ``type`` defines the selector type, it can be ``"html"``, ``"xml"`` 51 or ``None`` (default). 52 53 If ``type`` is ``None``, the selector automatically chooses the best type 54 based on ``response`` type (see below), or defaults to ``"html"`` in case it 55 is used together with ``text``. 56 57 If ``type`` is ``None`` and a ``response`` is passed, the selector type is 58 inferred from the response type as follows: 59 60 * ``"html"`` for :class:`~scrapy.http.HtmlResponse` type 61 * ``"xml"`` for :class:`~scrapy.http.XmlResponse` type 62 * ``"html"`` for anything else 63 64 Otherwise, if ``type`` is set, the selector type will be forced and no 65 detection will occur. 66 """ 67 68 __slots__ = ["response"] 69 selectorlist_cls = SelectorList 70 71 def __init__( 72 self, 73 response: Optional[TextResponse] = None, 74 text: Optional[str] = None, 75 type: Optional[str] = None, 76 root: Optional[Any] = _NOT_SET, 77 **kwargs: Any, 78 ): 79 if response is not None and text is not None: 80 raise ValueError( 81 f"{self.__class__.__name__}.__init__() received " 82 "both response and text" 83 ) 84 85 st = _st(response, type) 86 87 if text is not None: 88 response = _response_from_text(text, st) 89 90 if response is not None: 91 text = response.text 92 kwargs.setdefault("base_url", get_base_url(response)) 93 94 self.response = response 95 96 if root is not _NOT_SET: 97 kwargs["root"] = root 98 99 super().__init__(text=text, type=st, **kwargs) 100 [end of scrapy/selector/unified.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/selector/unified.py b/scrapy/selector/unified.py --- a/scrapy/selector/unified.py +++ b/scrapy/selector/unified.py @@ -47,7 +47,7 @@ ``response`` isn't available. Using ``text`` and ``response`` together is undefined behavior. - ``type`` defines the selector type, it can be ``"html"``, ``"xml"`` + ``type`` defines the selector type, it can be ``"html"``, ``"xml"``, ``"json"`` or ``None`` (default). If ``type`` is ``None``, the selector automatically chooses the best type
{"golden_diff": "diff --git a/scrapy/selector/unified.py b/scrapy/selector/unified.py\n--- a/scrapy/selector/unified.py\n+++ b/scrapy/selector/unified.py\n@@ -47,7 +47,7 @@\n ``response`` isn't available. Using ``text`` and ``response`` together is\n undefined behavior.\n \n- ``type`` defines the selector type, it can be ``\"html\"``, ``\"xml\"``\n+ ``type`` defines the selector type, it can be ``\"html\"``, ``\"xml\"``, ``\"json\"``\n or ``None`` (default).\n \n If ``type`` is ``None``, the selector automatically chooses the best type\n", "issue": "'json' selector type not documented\nhttps://docs.scrapy.org/en/latest/topics/selectors.html\r\n\r\n`type defines the selector type, it can be \"html\", \"xml\" or None (default).`\r\n\r\nBut my tests are now failing because it's returning `json` type for actual json docs now.\n", "before_files": [{"content": "\"\"\"\nXPath selectors based on lxml\n\"\"\"\n\nfrom typing import Any, Optional, Type, Union\n\nfrom parsel import Selector as _ParselSelector\n\nfrom scrapy.http import HtmlResponse, TextResponse, XmlResponse\nfrom scrapy.utils.python import to_bytes\nfrom scrapy.utils.response import get_base_url\nfrom scrapy.utils.trackref import object_ref\n\n__all__ = [\"Selector\", \"SelectorList\"]\n\n_NOT_SET = object()\n\n\ndef _st(response: Optional[TextResponse], st: Optional[str]) -> str:\n if st is None:\n return \"xml\" if isinstance(response, XmlResponse) else \"html\"\n return st\n\n\ndef _response_from_text(text: Union[str, bytes], st: Optional[str]) -> TextResponse:\n rt: Type[TextResponse] = XmlResponse if st == \"xml\" else HtmlResponse\n return rt(url=\"about:blank\", encoding=\"utf-8\", body=to_bytes(text, \"utf-8\"))\n\n\nclass SelectorList(_ParselSelector.selectorlist_cls, object_ref):\n \"\"\"\n The :class:`SelectorList` class is a subclass of the builtin ``list``\n class, which provides a few additional methods.\n \"\"\"\n\n\nclass Selector(_ParselSelector, object_ref):\n \"\"\"\n An instance of :class:`Selector` is a wrapper over response to select\n certain parts of its content.\n\n ``response`` is an :class:`~scrapy.http.HtmlResponse` or an\n :class:`~scrapy.http.XmlResponse` object that will be used for selecting\n and extracting data.\n\n ``text`` is a unicode string or utf-8 encoded text for cases when a\n ``response`` isn't available. Using ``text`` and ``response`` together is\n undefined behavior.\n\n ``type`` defines the selector type, it can be ``\"html\"``, ``\"xml\"``\n or ``None`` (default).\n\n If ``type`` is ``None``, the selector automatically chooses the best type\n based on ``response`` type (see below), or defaults to ``\"html\"`` in case it\n is used together with ``text``.\n\n If ``type`` is ``None`` and a ``response`` is passed, the selector type is\n inferred from the response type as follows:\n\n * ``\"html\"`` for :class:`~scrapy.http.HtmlResponse` type\n * ``\"xml\"`` for :class:`~scrapy.http.XmlResponse` type\n * ``\"html\"`` for anything else\n\n Otherwise, if ``type`` is set, the selector type will be forced and no\n detection will occur.\n \"\"\"\n\n __slots__ = [\"response\"]\n selectorlist_cls = SelectorList\n\n def __init__(\n self,\n response: Optional[TextResponse] = None,\n text: Optional[str] = None,\n type: Optional[str] = None,\n root: Optional[Any] = _NOT_SET,\n **kwargs: Any,\n ):\n if response is not None and text is not None:\n raise ValueError(\n f\"{self.__class__.__name__}.__init__() received \"\n \"both response and text\"\n )\n\n st = _st(response, type)\n\n if text is not None:\n response = _response_from_text(text, st)\n\n if response is not None:\n text = response.text\n kwargs.setdefault(\"base_url\", get_base_url(response))\n\n self.response = response\n\n if root is not _NOT_SET:\n kwargs[\"root\"] = root\n\n super().__init__(text=text, type=st, **kwargs)\n", "path": "scrapy/selector/unified.py"}]}
1,575
154
gh_patches_debug_9764
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-895
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> fixing abki_de recycling fix for #891 </issue> <code> [start of custom_components/waste_collection_schedule/waste_collection_schedule/source/abki_de.py] 1 import requests 2 from waste_collection_schedule import Collection # type: ignore[attr-defined] 3 from waste_collection_schedule.service.ICS import ICS 4 5 from datetime import datetime 6 import logging 7 8 TITLE = "Abfallwirtschaftsbetrieb Kiel (ABK)" 9 DESCRIPTION = "Source for Abfallwirtschaftsbetrieb Kiel (ABK)." 10 URL = "https://abki.de/" 11 TEST_CASES = { 12 "auguste-viktoria-straße, 14": {"street": "auguste-viktoria-straße", "number": 14}, 13 "Achterwehrer Straße, 1 A": {"street": "Achterwehrer Straße", "number": "1 a"}, 14 "Boltenhagener Straße, 4-8": {"street": "Boltenhagener Straße", "number": "4-8"}, 15 } 16 17 18 ICON_MAP = { 19 "Restabfall": "mdi:trash-can", 20 "Glass": "mdi:bottle-soda", 21 "Bioabfall": "mdi:leaf", 22 "Papier": "mdi:package-variant", 23 "Gelbe": "mdi:recycle", 24 } 25 26 27 ICAL_URL = "https://abki.de/abki-services/abki-leerungen-ical" 28 _LOGGER = logging.getLogger(__name__) 29 30 31 class Source: 32 def __init__(self, street: str, number: str | int): 33 self._street: str = street 34 self._number: str = str(number) 35 self._ics = ICS() 36 37 def fetch(self): 38 now = datetime.now() 39 session = requests.Session() 40 41 # get street id 42 params = f'filter[logic]=and&filter[filters][0][value]={self._street}&filter[filters][0][field]=Strasse&filter[filters][0][operator]=startswith&filter[filters][0][ignoreCase]=true' 43 r = session.get( 44 "https://abki.de/abki-services/strassennamen", params=params) # , params=params) 45 r.raise_for_status() 46 47 streets = r.json() 48 if len(streets) > 1: 49 _LOGGER.warning( 50 "Multiple streets found please be more specific, using first one: "+streets[0]["Strasse"]) 51 if len(streets) < 1: 52 raise ValueError("No street found", self._street) 53 54 street_id = streets[0]["IDSTREET"] 55 56 # get number id 57 r = session.get("https://abki.de/abki-services/streetnumber", 58 params={"IDSTREET": street_id}) 59 r.raise_for_status() 60 numbers = r.json() 61 number_id, standort_id = None, None 62 for number in numbers: 63 if number["NUMBER"].lower().replace(" ", "").replace("-", "") == self._number.lower().replace(" ", "").replace("-", ""): 64 number_id = number["id"] 65 standort_id = number["IDSTANDORT"] 66 break 67 68 if number_id is None: 69 raise ValueError("No number found", self._number) 70 71 # get ics file link 72 r = session.get("https://abki.de/abki-services/leerungen-data", params={ 73 "Zeitraum": now.year, 74 "Strasse_input": self._street, 75 "Strasse": street_id, 76 "IDSTANDORT_input": 2, 77 "IDSTANDORT": standort_id, 78 "Hausnummernwahl": number_id 79 }) 80 r.raise_for_status() 81 request_data = r.json()["dataFile"] 82 83 # get ICS file 84 r = session.get(ICAL_URL, params={"data": request_data}) 85 86 dates = self._ics.convert(r.text) 87 88 # if december, also try to get next year 89 if now.month == 12: 90 try: 91 r = session.get("https://abki.de/abki-services/leerungen-data", params={ 92 "Zeitraum": now.year+1, 93 "Strasse_input": "Aarhusstraße", 94 "Strasse": street_id, 95 "IDSTANDORT_input": 2, 96 "IDSTANDORT": standort_id, 97 "Hausnummernwahl": number_id 98 }) 99 r.raise_for_status() 100 request_data = r.json()["dataFile"] 101 r = session.get(ICAL_URL, params={"data": request_data}) 102 dates += self._ics.convert(r.text) 103 except: 104 pass 105 106 entries = [] 107 for d in dates: 108 entries.append(Collection( 109 d[0], d[1], ICON_MAP.get(d[1].split(" ")[0]))) 110 111 return entries 112 [end of custom_components/waste_collection_schedule/waste_collection_schedule/source/abki_de.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/abki_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/abki_de.py --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/abki_de.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/abki_de.py @@ -90,7 +90,7 @@ try: r = session.get("https://abki.de/abki-services/leerungen-data", params={ "Zeitraum": now.year+1, - "Strasse_input": "Aarhusstraße", + "Strasse_input": self._street, "Strasse": street_id, "IDSTANDORT_input": 2, "IDSTANDORT": standort_id,
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/abki_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/abki_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/abki_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/abki_de.py\n@@ -90,7 +90,7 @@\n try:\n r = session.get(\"https://abki.de/abki-services/leerungen-data\", params={\n \"Zeitraum\": now.year+1,\n- \"Strasse_input\": \"Aarhusstra\u00dfe\",\n+ \"Strasse_input\": self._street,\n \"Strasse\": street_id,\n \"IDSTANDORT_input\": 2,\n \"IDSTANDORT\": standort_id,\n", "issue": "fixing abki_de recycling\nfix for #891\n", "before_files": [{"content": "import requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nfrom datetime import datetime\nimport logging\n\nTITLE = \"Abfallwirtschaftsbetrieb Kiel (ABK)\"\nDESCRIPTION = \"Source for Abfallwirtschaftsbetrieb Kiel (ABK).\"\nURL = \"https://abki.de/\"\nTEST_CASES = {\n \"auguste-viktoria-stra\u00dfe, 14\": {\"street\": \"auguste-viktoria-stra\u00dfe\", \"number\": 14},\n \"Achterwehrer Stra\u00dfe, 1 A\": {\"street\": \"Achterwehrer Stra\u00dfe\", \"number\": \"1 a\"},\n \"Boltenhagener Stra\u00dfe, 4-8\": {\"street\": \"Boltenhagener Stra\u00dfe\", \"number\": \"4-8\"},\n}\n\n\nICON_MAP = {\n \"Restabfall\": \"mdi:trash-can\",\n \"Glass\": \"mdi:bottle-soda\",\n \"Bioabfall\": \"mdi:leaf\",\n \"Papier\": \"mdi:package-variant\",\n \"Gelbe\": \"mdi:recycle\",\n}\n\n\nICAL_URL = \"https://abki.de/abki-services/abki-leerungen-ical\"\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, street: str, number: str | int):\n self._street: str = street\n self._number: str = str(number)\n self._ics = ICS()\n\n def fetch(self):\n now = datetime.now()\n session = requests.Session()\n\n # get street id\n params = f'filter[logic]=and&filter[filters][0][value]={self._street}&filter[filters][0][field]=Strasse&filter[filters][0][operator]=startswith&filter[filters][0][ignoreCase]=true'\n r = session.get(\n \"https://abki.de/abki-services/strassennamen\", params=params) # , params=params)\n r.raise_for_status()\n\n streets = r.json()\n if len(streets) > 1:\n _LOGGER.warning(\n \"Multiple streets found please be more specific, using first one: \"+streets[0][\"Strasse\"])\n if len(streets) < 1:\n raise ValueError(\"No street found\", self._street)\n\n street_id = streets[0][\"IDSTREET\"]\n\n # get number id\n r = session.get(\"https://abki.de/abki-services/streetnumber\",\n params={\"IDSTREET\": street_id})\n r.raise_for_status()\n numbers = r.json()\n number_id, standort_id = None, None\n for number in numbers:\n if number[\"NUMBER\"].lower().replace(\" \", \"\").replace(\"-\", \"\") == self._number.lower().replace(\" \", \"\").replace(\"-\", \"\"):\n number_id = number[\"id\"]\n standort_id = number[\"IDSTANDORT\"]\n break\n \n if number_id is None:\n raise ValueError(\"No number found\", self._number)\n\n # get ics file link\n r = session.get(\"https://abki.de/abki-services/leerungen-data\", params={\n \"Zeitraum\": now.year,\n \"Strasse_input\": self._street,\n \"Strasse\": street_id,\n \"IDSTANDORT_input\": 2,\n \"IDSTANDORT\": standort_id,\n \"Hausnummernwahl\": number_id\n })\n r.raise_for_status()\n request_data = r.json()[\"dataFile\"]\n\n # get ICS file\n r = session.get(ICAL_URL, params={\"data\": request_data})\n\n dates = self._ics.convert(r.text)\n\n # if december, also try to get next year\n if now.month == 12:\n try:\n r = session.get(\"https://abki.de/abki-services/leerungen-data\", params={\n \"Zeitraum\": now.year+1,\n \"Strasse_input\": \"Aarhusstra\u00dfe\",\n \"Strasse\": street_id,\n \"IDSTANDORT_input\": 2,\n \"IDSTANDORT\": standort_id,\n \"Hausnummernwahl\": number_id\n })\n r.raise_for_status()\n request_data = r.json()[\"dataFile\"]\n r = session.get(ICAL_URL, params={\"data\": request_data})\n dates += self._ics.convert(r.text)\n except:\n pass\n\n entries = []\n for d in dates:\n entries.append(Collection(\n d[0], d[1], ICON_MAP.get(d[1].split(\" \")[0])))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/abki_de.py"}]}
1,827
179
gh_patches_debug_53281
rasdani/github-patches
git_diff
holoviz__panel-645
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Port not being released after stopping threaded holoviz panel app server Closing a threaded panel app holds on to the port it started on. This is different behavior than closing an app initialized without threading. ``` usgs_logo = pn.panel('../assets/usgs_logo.png', height=130) column = pn.Column(usgs_logo) app = column.show(port=8889) app.stop() ``` Port 8889 is released. ``` app = row.show(port=8889, threaded=True) app.stop() ``` Port 8889 is not released. </issue> <code> [start of panel/io/server.py] 1 """ 2 Utilities for creating bokeh Server instances. 3 """ 4 from __future__ import absolute_import, division, unicode_literals 5 6 import signal 7 import threading 8 9 from functools import partial 10 11 from bokeh.server.server import Server 12 13 from .state import state 14 15 16 #--------------------------------------------------------------------- 17 # Private API 18 #--------------------------------------------------------------------- 19 20 def _origin_url(url): 21 if url.startswith("http"): 22 url = url.split("//")[1] 23 return url 24 25 26 def _server_url(url, port): 27 if url.startswith("http"): 28 return '%s:%d%s' % (url.rsplit(':', 1)[0], port, "/") 29 else: 30 return 'http://%s:%d%s' % (url.split(':')[0], port, "/") 31 32 #--------------------------------------------------------------------- 33 # Public API 34 #--------------------------------------------------------------------- 35 36 def get_server(panel, port=0, websocket_origin=None, loop=None, 37 show=False, start=False, **kwargs): 38 """ 39 Returns a Server instance with this panel attached as the root 40 app. 41 42 Arguments 43 --------- 44 port: int (optional, default=0) 45 Allows specifying a specific port 46 websocket_origin: str or list(str) (optional) 47 A list of hosts that can connect to the websocket. 48 49 This is typically required when embedding a server app in 50 an external web site. 51 52 If None, "localhost" is used. 53 loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current()) 54 The tornado IOLoop to run the Server on 55 show : boolean (optional, default=False) 56 Whether to open the server in a new browser tab on start 57 start : boolean(optional, default=False) 58 Whether to start the Server 59 kwargs: dict 60 Additional keyword arguments to pass to Server instance 61 62 Returns 63 ------- 64 server : bokeh.server.server.Server 65 Bokeh Server instance running this panel 66 """ 67 from tornado.ioloop import IOLoop 68 opts = dict(kwargs) 69 if loop: 70 loop.make_current() 71 opts['io_loop'] = loop 72 else: 73 opts['io_loop'] = IOLoop.current() 74 75 if websocket_origin: 76 if not isinstance(websocket_origin, list): 77 websocket_origin = [websocket_origin] 78 opts['allow_websocket_origin'] = websocket_origin 79 80 server_id = kwargs.pop('server_id', None) 81 server = Server({'/': partial(panel._modify_doc, server_id)}, port=port, **opts) 82 if server_id: 83 state._servers[server_id] = (server, panel, []) 84 85 if show: 86 def show_callback(): 87 server.show('/') 88 server.io_loop.add_callback(show_callback) 89 90 def sig_exit(*args, **kwargs): 91 server.io_loop.add_callback_from_signal(do_stop) 92 93 def do_stop(*args, **kwargs): 94 server.io_loop.stop() 95 96 try: 97 signal.signal(signal.SIGINT, sig_exit) 98 except ValueError: 99 pass # Can't use signal on a thread 100 101 if start: 102 server.start() 103 try: 104 server.io_loop.start() 105 except RuntimeError: 106 pass 107 return server 108 109 110 class StoppableThread(threading.Thread): 111 """Thread class with a stop() method.""" 112 113 def __init__(self, io_loop=None, timeout=1000, **kwargs): 114 from tornado import ioloop 115 super(StoppableThread, self).__init__(**kwargs) 116 self._stop_event = threading.Event() 117 self.io_loop = io_loop 118 self._cb = ioloop.PeriodicCallback(self._check_stopped, timeout) 119 self._cb.start() 120 121 def _check_stopped(self): 122 if self.stopped: 123 self._cb.stop() 124 self.io_loop.stop() 125 126 def stop(self): 127 self._stop_event.set() 128 129 @property 130 def stopped(self): 131 return self._stop_event.is_set() 132 [end of panel/io/server.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/panel/io/server.py b/panel/io/server.py --- a/panel/io/server.py +++ b/panel/io/server.py @@ -122,6 +122,15 @@ if self.stopped: self._cb.stop() self.io_loop.stop() + + def run(self): + try: + if self._target: + bokeh_server = self._target(*self._args, **self._kwargs) + finally: + if isinstance(bokeh_server, Server): + bokeh_server.stop() + del self._target, self._args, self._kwargs def stop(self): self._stop_event.set()
{"golden_diff": "diff --git a/panel/io/server.py b/panel/io/server.py\n--- a/panel/io/server.py\n+++ b/panel/io/server.py\n@@ -122,6 +122,15 @@\n if self.stopped:\n self._cb.stop()\n self.io_loop.stop()\n+ \n+ def run(self):\n+ try:\n+ if self._target:\n+ bokeh_server = self._target(*self._args, **self._kwargs)\n+ finally:\n+ if isinstance(bokeh_server, Server):\n+ bokeh_server.stop()\n+ del self._target, self._args, self._kwargs\n \n def stop(self):\n self._stop_event.set()\n", "issue": "Port not being released after stopping threaded holoviz panel app server\nClosing a threaded panel app holds on to the port it started on. This is different behavior than closing an app initialized without threading.\r\n\r\n```\r\nusgs_logo = pn.panel('../assets/usgs_logo.png', height=130)\r\n\r\ncolumn = pn.Column(usgs_logo)\r\n\r\napp = column.show(port=8889)\r\n\r\napp.stop()\r\n```\r\n\r\nPort 8889 is released.\r\n\r\n```\r\napp = row.show(port=8889, threaded=True)\r\n\r\napp.stop()\r\n```\r\nPort 8889 is not released.\n", "before_files": [{"content": "\"\"\"\nUtilities for creating bokeh Server instances.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport signal\nimport threading\n\nfrom functools import partial\n\nfrom bokeh.server.server import Server\n\nfrom .state import state\n\n\n#---------------------------------------------------------------------\n# Private API\n#---------------------------------------------------------------------\n\ndef _origin_url(url):\n if url.startswith(\"http\"):\n url = url.split(\"//\")[1]\n return url\n\n\ndef _server_url(url, port):\n if url.startswith(\"http\"):\n return '%s:%d%s' % (url.rsplit(':', 1)[0], port, \"/\")\n else:\n return 'http://%s:%d%s' % (url.split(':')[0], port, \"/\")\n\n#---------------------------------------------------------------------\n# Public API\n#---------------------------------------------------------------------\n\ndef get_server(panel, port=0, websocket_origin=None, loop=None,\n show=False, start=False, **kwargs):\n \"\"\"\n Returns a Server instance with this panel attached as the root\n app.\n\n Arguments\n ---------\n port: int (optional, default=0)\n Allows specifying a specific port\n websocket_origin: str or list(str) (optional)\n A list of hosts that can connect to the websocket.\n\n This is typically required when embedding a server app in\n an external web site.\n\n If None, \"localhost\" is used.\n loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())\n The tornado IOLoop to run the Server on\n show : boolean (optional, default=False)\n Whether to open the server in a new browser tab on start\n start : boolean(optional, default=False)\n Whether to start the Server\n kwargs: dict\n Additional keyword arguments to pass to Server instance\n\n Returns\n -------\n server : bokeh.server.server.Server\n Bokeh Server instance running this panel\n \"\"\"\n from tornado.ioloop import IOLoop\n opts = dict(kwargs)\n if loop:\n loop.make_current()\n opts['io_loop'] = loop\n else:\n opts['io_loop'] = IOLoop.current()\n\n if websocket_origin:\n if not isinstance(websocket_origin, list):\n websocket_origin = [websocket_origin]\n opts['allow_websocket_origin'] = websocket_origin\n\n server_id = kwargs.pop('server_id', None)\n server = Server({'/': partial(panel._modify_doc, server_id)}, port=port, **opts)\n if server_id:\n state._servers[server_id] = (server, panel, [])\n\n if show:\n def show_callback():\n server.show('/')\n server.io_loop.add_callback(show_callback)\n\n def sig_exit(*args, **kwargs):\n server.io_loop.add_callback_from_signal(do_stop)\n\n def do_stop(*args, **kwargs):\n server.io_loop.stop()\n\n try:\n signal.signal(signal.SIGINT, sig_exit)\n except ValueError:\n pass # Can't use signal on a thread\n\n if start:\n server.start()\n try:\n server.io_loop.start()\n except RuntimeError:\n pass\n return server\n\n\nclass StoppableThread(threading.Thread):\n \"\"\"Thread class with a stop() method.\"\"\"\n\n def __init__(self, io_loop=None, timeout=1000, **kwargs):\n from tornado import ioloop\n super(StoppableThread, self).__init__(**kwargs)\n self._stop_event = threading.Event()\n self.io_loop = io_loop\n self._cb = ioloop.PeriodicCallback(self._check_stopped, timeout)\n self._cb.start()\n\n def _check_stopped(self):\n if self.stopped:\n self._cb.stop()\n self.io_loop.stop()\n\n def stop(self):\n self._stop_event.set()\n\n @property\n def stopped(self):\n return self._stop_event.is_set()\n", "path": "panel/io/server.py"}]}
1,778
153
gh_patches_debug_5160
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-5342
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests </issue> <code> [start of extensions/cpp_extension.py] 1 import importlib 2 import os 3 import time 4 from abc import abstractmethod 5 from pathlib import Path 6 from typing import List 7 8 from .base_extension import _Extension 9 10 __all__ = ["_CppExtension"] 11 12 13 class _CppExtension(_Extension): 14 def __init__(self, name: str, priority: int = 1): 15 super().__init__(name, support_aot=True, support_jit=True, priority=priority) 16 17 # we store the op as an attribute to avoid repeated building and loading 18 self.cached_op = None 19 20 # build-related variables 21 self.prebuilt_module_path = "colossalai._C" 22 self.prebuilt_import_path = f"{self.prebuilt_module_path}.{self.name}" 23 self.version_dependent_macros = ["-DVERSION_GE_1_1", "-DVERSION_GE_1_3", "-DVERSION_GE_1_5"] 24 25 def csrc_abs_path(self, path): 26 return os.path.join(self.relative_to_abs_path("csrc"), path) 27 28 def relative_to_abs_path(self, code_path: str) -> str: 29 """ 30 This function takes in a path relative to the colossalai root directory and return the absolute path. 31 """ 32 33 # get the current file path 34 # iteratively check the parent directory 35 # if the parent directory is "extensions", then the current file path is the root directory 36 # otherwise, the current file path is inside the root directory 37 current_file_path = Path(__file__) 38 while True: 39 if current_file_path.name == "extensions": 40 break 41 else: 42 current_file_path = current_file_path.parent 43 extension_module_path = current_file_path 44 code_abs_path = extension_module_path.joinpath(code_path) 45 return str(code_abs_path) 46 47 # functions must be overrided over 48 def strip_empty_entries(self, args): 49 """ 50 Drop any empty strings from the list of compile and link flags 51 """ 52 return [x for x in args if len(x) > 0] 53 54 def import_op(self): 55 """ 56 This function will import the op module by its string name. 57 """ 58 return importlib.import_module(self.prebuilt_import_path) 59 60 def build_aot(self) -> "CppExtension": 61 from torch.utils.cpp_extension import CppExtension 62 63 return CppExtension( 64 name=self.prebuilt_import_path, 65 sources=self.strip_empty_entries(self.sources_files()), 66 include_dirs=self.strip_empty_entries(self.include_dirs()), 67 extra_compile_args=self.strip_empty_entries(self.cxx_flags()), 68 ) 69 70 def build_jit(self) -> None: 71 from torch.utils.cpp_extension import load 72 73 build_directory = _Extension.get_jit_extension_folder_path() 74 build_directory = Path(build_directory) 75 build_directory.mkdir(parents=True, exist_ok=True) 76 77 # check if the kernel has been built 78 compiled_before = False 79 kernel_file_path = build_directory.joinpath(f"{self.name}.o") 80 if kernel_file_path.exists(): 81 compiled_before = True 82 83 # load the kernel 84 if compiled_before: 85 print(f"[extension] Loading the JIT-built {self.name} kernel during runtime now") 86 else: 87 print(f"[extension] Compiling the JIT {self.name} kernel during runtime now") 88 89 build_start = time.time() 90 op_kernel = load( 91 name=self.name, 92 sources=self.strip_empty_entries(self.sources_files()), 93 extra_include_paths=self.strip_empty_entries(self.include_dirs()), 94 extra_cflags=self.cxx_flags(), 95 extra_ldflags=[], 96 build_directory=str(build_directory), 97 ) 98 build_duration = time.time() - build_start 99 100 if compiled_before: 101 print(f"[extension] Time taken to load {self.name} op: {build_duration} seconds") 102 else: 103 print(f"[extension] Time taken to compile {self.name} op: {build_duration} seconds") 104 105 return op_kernel 106 107 # functions must be overrided begin 108 @abstractmethod 109 def sources_files(self) -> List[str]: 110 """ 111 This function should return a list of source files for extensions. 112 """ 113 114 @abstractmethod 115 def include_dirs(self) -> List[str]: 116 """ 117 This function should return a list of include files for extensions. 118 """ 119 120 @abstractmethod 121 def cxx_flags(self) -> List[str]: 122 """ 123 This function should return a list of cxx compilation flags for extensions. 124 """ 125 126 def load(self): 127 try: 128 op_kernel = self.import_op() 129 except ImportError: 130 # if import error occurs, it means that the kernel is not pre-built 131 # so we build it jit 132 op_kernel = self.build_jit() 133 134 return op_kernel 135 [end of extensions/cpp_extension.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/extensions/cpp_extension.py b/extensions/cpp_extension.py --- a/extensions/cpp_extension.py +++ b/extensions/cpp_extension.py @@ -126,7 +126,7 @@ def load(self): try: op_kernel = self.import_op() - except ImportError: + except (ImportError, ModuleNotFoundError): # if import error occurs, it means that the kernel is not pre-built # so we build it jit op_kernel = self.build_jit()
{"golden_diff": "diff --git a/extensions/cpp_extension.py b/extensions/cpp_extension.py\n--- a/extensions/cpp_extension.py\n+++ b/extensions/cpp_extension.py\n@@ -126,7 +126,7 @@\n def load(self):\n try:\n op_kernel = self.import_op()\n- except ImportError:\n+ except (ImportError, ModuleNotFoundError):\n # if import error occurs, it means that the kernel is not pre-built\n # so we build it jit\n op_kernel = self.build_jit()\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import importlib\nimport os\nimport time\nfrom abc import abstractmethod\nfrom pathlib import Path\nfrom typing import List\n\nfrom .base_extension import _Extension\n\n__all__ = [\"_CppExtension\"]\n\n\nclass _CppExtension(_Extension):\n def __init__(self, name: str, priority: int = 1):\n super().__init__(name, support_aot=True, support_jit=True, priority=priority)\n\n # we store the op as an attribute to avoid repeated building and loading\n self.cached_op = None\n\n # build-related variables\n self.prebuilt_module_path = \"colossalai._C\"\n self.prebuilt_import_path = f\"{self.prebuilt_module_path}.{self.name}\"\n self.version_dependent_macros = [\"-DVERSION_GE_1_1\", \"-DVERSION_GE_1_3\", \"-DVERSION_GE_1_5\"]\n\n def csrc_abs_path(self, path):\n return os.path.join(self.relative_to_abs_path(\"csrc\"), path)\n\n def relative_to_abs_path(self, code_path: str) -> str:\n \"\"\"\n This function takes in a path relative to the colossalai root directory and return the absolute path.\n \"\"\"\n\n # get the current file path\n # iteratively check the parent directory\n # if the parent directory is \"extensions\", then the current file path is the root directory\n # otherwise, the current file path is inside the root directory\n current_file_path = Path(__file__)\n while True:\n if current_file_path.name == \"extensions\":\n break\n else:\n current_file_path = current_file_path.parent\n extension_module_path = current_file_path\n code_abs_path = extension_module_path.joinpath(code_path)\n return str(code_abs_path)\n\n # functions must be overrided over\n def strip_empty_entries(self, args):\n \"\"\"\n Drop any empty strings from the list of compile and link flags\n \"\"\"\n return [x for x in args if len(x) > 0]\n\n def import_op(self):\n \"\"\"\n This function will import the op module by its string name.\n \"\"\"\n return importlib.import_module(self.prebuilt_import_path)\n\n def build_aot(self) -> \"CppExtension\":\n from torch.utils.cpp_extension import CppExtension\n\n return CppExtension(\n name=self.prebuilt_import_path,\n sources=self.strip_empty_entries(self.sources_files()),\n include_dirs=self.strip_empty_entries(self.include_dirs()),\n extra_compile_args=self.strip_empty_entries(self.cxx_flags()),\n )\n\n def build_jit(self) -> None:\n from torch.utils.cpp_extension import load\n\n build_directory = _Extension.get_jit_extension_folder_path()\n build_directory = Path(build_directory)\n build_directory.mkdir(parents=True, exist_ok=True)\n\n # check if the kernel has been built\n compiled_before = False\n kernel_file_path = build_directory.joinpath(f\"{self.name}.o\")\n if kernel_file_path.exists():\n compiled_before = True\n\n # load the kernel\n if compiled_before:\n print(f\"[extension] Loading the JIT-built {self.name} kernel during runtime now\")\n else:\n print(f\"[extension] Compiling the JIT {self.name} kernel during runtime now\")\n\n build_start = time.time()\n op_kernel = load(\n name=self.name,\n sources=self.strip_empty_entries(self.sources_files()),\n extra_include_paths=self.strip_empty_entries(self.include_dirs()),\n extra_cflags=self.cxx_flags(),\n extra_ldflags=[],\n build_directory=str(build_directory),\n )\n build_duration = time.time() - build_start\n\n if compiled_before:\n print(f\"[extension] Time taken to load {self.name} op: {build_duration} seconds\")\n else:\n print(f\"[extension] Time taken to compile {self.name} op: {build_duration} seconds\")\n\n return op_kernel\n\n # functions must be overrided begin\n @abstractmethod\n def sources_files(self) -> List[str]:\n \"\"\"\n This function should return a list of source files for extensions.\n \"\"\"\n\n @abstractmethod\n def include_dirs(self) -> List[str]:\n \"\"\"\n This function should return a list of include files for extensions.\n \"\"\"\n\n @abstractmethod\n def cxx_flags(self) -> List[str]:\n \"\"\"\n This function should return a list of cxx compilation flags for extensions.\n \"\"\"\n\n def load(self):\n try:\n op_kernel = self.import_op()\n except ImportError:\n # if import error occurs, it means that the kernel is not pre-built\n # so we build it jit\n op_kernel = self.build_jit()\n\n return op_kernel\n", "path": "extensions/cpp_extension.py"}]}
1,858
108
gh_patches_debug_29779
rasdani/github-patches
git_diff
e-valuation__EvaP-597
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add "public view" on results pages for staff users For staff users there should be an option on the results pages to switch between "public view" (without comments, like students would see the page) and "complete view" (the current view including all comments). </issue> <code> [start of evap/results/views.py] 1 from django.http import HttpResponse 2 from django.core.exceptions import PermissionDenied 3 from django.shortcuts import get_object_or_404, render 4 from django.utils.translation import get_language 5 from django.contrib.auth.decorators import login_required 6 7 from evap.evaluation.auth import staff_required 8 from evap.evaluation.models import Semester, Degree 9 from evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, TextResult 10 11 from evap.results.exporters import ExcelExporter 12 13 from collections import OrderedDict, namedtuple 14 15 16 @login_required 17 def index(request): 18 semesters = Semester.get_all_with_published_courses() 19 20 return render(request, "results_index.html", dict(semesters=semesters)) 21 22 23 @login_required 24 def semester_detail(request, semester_id): 25 semester = get_object_or_404(Semester, id=semester_id) 26 courses = list(semester.course_set.filter(state="published").prefetch_related("degrees")) 27 28 # annotate each course object with its grades 29 for course in courses: 30 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course) 31 32 CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results')) 33 34 courses_by_degree = OrderedDict() 35 for degree in Degree.objects.all(): 36 courses_by_degree[degree] = CourseTuple([], []) 37 for course in courses: 38 if course.is_single_result(): 39 for degree in course.degrees.all(): 40 section = calculate_results(course)[0] 41 result = section.results[0] 42 courses_by_degree[degree].single_results.append((course, result)) 43 else: 44 for degree in course.degrees.all(): 45 courses_by_degree[degree].courses.append(course) 46 47 template_data = dict(semester=semester, courses_by_degree=courses_by_degree, staff=request.user.is_staff) 48 return render(request, "results_semester_detail.html", template_data) 49 50 51 @staff_required 52 def semester_export(request, semester_id): 53 semester = get_object_or_404(Semester, id=semester_id) 54 55 filename = "Evaluation-%s-%s.xls" % (semester.name, get_language()) 56 57 response = HttpResponse(content_type="application/vnd.ms-excel") 58 response["Content-Disposition"] = "attachment; filename=\"%s\"" % filename 59 60 ExcelExporter(semester).export(response, 'all' in request.GET) 61 62 return response 63 64 65 @login_required 66 def course_detail(request, semester_id, course_id): 67 semester = get_object_or_404(Semester, id=semester_id) 68 course = get_object_or_404(semester.course_set, id=course_id) 69 70 if not course.can_user_see_results(request.user): 71 raise PermissionDenied 72 73 sections = calculate_results(course) 74 75 for section in sections: 76 results = [] 77 for result in section.results: 78 if isinstance(result, TextResult): 79 answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, answer)] 80 if answers: 81 results.append(TextResult(question=result.question, answers=answers)) 82 else: 83 results.append(result) 84 section.results[:] = results 85 86 # filter empty sections and group by contributor 87 course_sections = [] 88 contributor_sections = OrderedDict() 89 for section in sections: 90 if not section.results: 91 continue 92 if section.contributor is None: 93 course_sections.append(section) 94 else: 95 contributor_sections.setdefault(section.contributor, []).append(section) 96 97 # show a warning if course is still in evaluation (for staff preview) 98 evaluation_warning = course.state != 'published' 99 100 # results for a course might not be visible because there are not enough answers 101 # but it can still be "published" e.g. to show the comment results to contributors. 102 # users who can open the results page see a warning message in this case 103 sufficient_votes_warning = not course.can_publish_grades 104 105 show_grades = request.user.is_staff or course.can_publish_grades 106 107 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course) 108 109 template_data = dict( 110 course=course, 111 course_sections=course_sections, 112 contributor_sections=contributor_sections, 113 evaluation_warning=evaluation_warning, 114 sufficient_votes_warning=sufficient_votes_warning, 115 show_grades=show_grades, 116 staff=request.user.is_staff) 117 return render(request, "results_course_detail.html", template_data) 118 119 def user_can_see_text_answer(user, text_answer): 120 if user.is_staff: 121 return True 122 contributor = text_answer.contribution.contributor 123 if text_answer.is_private: 124 return contributor == user 125 if text_answer.is_published: 126 if contributor == user or contributor in user.represented_users.all(): 127 return True 128 if text_answer.contribution.course.is_user_responsible_or_delegate(user): 129 return True 130 131 return False 132 [end of evap/results/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/evap/results/views.py b/evap/results/views.py --- a/evap/results/views.py +++ b/evap/results/views.py @@ -72,11 +72,14 @@ sections = calculate_results(course) + public_view = request.GET.get('public_view', 'false') # default: show own view + public_view = {'true': True, 'false': False}.get(public_view.lower()) # convert parameter to boolean + for section in sections: results = [] for result in section.results: if isinstance(result, TextResult): - answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, answer)] + answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, answer, public_view)] if answers: results.append(TextResult(question=result.question, answers=answers)) else: @@ -113,10 +116,14 @@ evaluation_warning=evaluation_warning, sufficient_votes_warning=sufficient_votes_warning, show_grades=show_grades, - staff=request.user.is_staff) + staff=request.user.is_staff, + contributor=course.is_user_contributor_or_delegate(request.user), + public_view=public_view) return render(request, "results_course_detail.html", template_data) -def user_can_see_text_answer(user, text_answer): +def user_can_see_text_answer(user, text_answer, public_view=False): + if public_view: + return False if user.is_staff: return True contributor = text_answer.contribution.contributor
{"golden_diff": "diff --git a/evap/results/views.py b/evap/results/views.py\n--- a/evap/results/views.py\n+++ b/evap/results/views.py\n@@ -72,11 +72,14 @@\n \n sections = calculate_results(course)\n \n+ public_view = request.GET.get('public_view', 'false') # default: show own view\n+ public_view = {'true': True, 'false': False}.get(public_view.lower()) # convert parameter to boolean\n+\n for section in sections:\n results = []\n for result in section.results:\n if isinstance(result, TextResult):\n- answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, answer)]\n+ answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, answer, public_view)]\n if answers:\n results.append(TextResult(question=result.question, answers=answers))\n else:\n@@ -113,10 +116,14 @@\n evaluation_warning=evaluation_warning,\n sufficient_votes_warning=sufficient_votes_warning,\n show_grades=show_grades,\n- staff=request.user.is_staff)\n+ staff=request.user.is_staff,\n+ contributor=course.is_user_contributor_or_delegate(request.user),\n+ public_view=public_view)\n return render(request, \"results_course_detail.html\", template_data)\n \n-def user_can_see_text_answer(user, text_answer):\n+def user_can_see_text_answer(user, text_answer, public_view=False):\n+ if public_view:\n+ return False\n if user.is_staff:\n return True\n contributor = text_answer.contribution.contributor\n", "issue": "Add \"public view\" on results pages for staff users\nFor staff users there should be an option on the results pages to switch between \"public view\" (without comments, like students would see the page) and \"complete view\" (the current view including all comments).\n\n", "before_files": [{"content": "from django.http import HttpResponse\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, render\nfrom django.utils.translation import get_language\nfrom django.contrib.auth.decorators import login_required\n\nfrom evap.evaluation.auth import staff_required\nfrom evap.evaluation.models import Semester, Degree\nfrom evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, TextResult\n\nfrom evap.results.exporters import ExcelExporter\n\nfrom collections import OrderedDict, namedtuple\n\n\n@login_required\ndef index(request):\n semesters = Semester.get_all_with_published_courses()\n\n return render(request, \"results_index.html\", dict(semesters=semesters))\n\n\n@login_required\ndef semester_detail(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n courses = list(semester.course_set.filter(state=\"published\").prefetch_related(\"degrees\"))\n\n # annotate each course object with its grades\n for course in courses:\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))\n\n courses_by_degree = OrderedDict()\n for degree in Degree.objects.all():\n courses_by_degree[degree] = CourseTuple([], [])\n for course in courses:\n if course.is_single_result():\n for degree in course.degrees.all():\n section = calculate_results(course)[0]\n result = section.results[0]\n courses_by_degree[degree].single_results.append((course, result))\n else:\n for degree in course.degrees.all():\n courses_by_degree[degree].courses.append(course)\n\n template_data = dict(semester=semester, courses_by_degree=courses_by_degree, staff=request.user.is_staff)\n return render(request, \"results_semester_detail.html\", template_data)\n\n\n@staff_required\ndef semester_export(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n\n filename = \"Evaluation-%s-%s.xls\" % (semester.name, get_language())\n\n response = HttpResponse(content_type=\"application/vnd.ms-excel\")\n response[\"Content-Disposition\"] = \"attachment; filename=\\\"%s\\\"\" % filename\n\n ExcelExporter(semester).export(response, 'all' in request.GET)\n\n return response\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(semester.course_set, id=course_id)\n\n if not course.can_user_see_results(request.user):\n raise PermissionDenied\n\n sections = calculate_results(course)\n\n for section in sections:\n results = []\n for result in section.results:\n if isinstance(result, TextResult):\n answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, answer)]\n if answers:\n results.append(TextResult(question=result.question, answers=answers))\n else:\n results.append(result)\n section.results[:] = results\n\n # filter empty sections and group by contributor\n course_sections = []\n contributor_sections = OrderedDict()\n for section in sections:\n if not section.results:\n continue\n if section.contributor is None:\n course_sections.append(section)\n else:\n contributor_sections.setdefault(section.contributor, []).append(section)\n\n # show a warning if course is still in evaluation (for staff preview)\n evaluation_warning = course.state != 'published'\n\n # results for a course might not be visible because there are not enough answers\n # but it can still be \"published\" e.g. to show the comment results to contributors.\n # users who can open the results page see a warning message in this case\n sufficient_votes_warning = not course.can_publish_grades\n\n show_grades = request.user.is_staff or course.can_publish_grades\n\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n template_data = dict(\n course=course,\n course_sections=course_sections,\n contributor_sections=contributor_sections,\n evaluation_warning=evaluation_warning,\n sufficient_votes_warning=sufficient_votes_warning,\n show_grades=show_grades,\n staff=request.user.is_staff)\n return render(request, \"results_course_detail.html\", template_data)\n\ndef user_can_see_text_answer(user, text_answer):\n if user.is_staff:\n return True\n contributor = text_answer.contribution.contributor\n if text_answer.is_private:\n return contributor == user\n if text_answer.is_published:\n if contributor == user or contributor in user.represented_users.all():\n return True\n if text_answer.contribution.course.is_user_responsible_or_delegate(user):\n return True\n\n return False\n", "path": "evap/results/views.py"}]}
1,910
357
gh_patches_debug_17589
rasdani/github-patches
git_diff
coala__coala-bears-1661
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> JuliaLintBear: Use JuliaRequirement `REQUIREMENTS` should be a `JuliaRequirement` instead of a `DistributionRequirement`. </issue> <code> [start of bears/julia/JuliaLintBear.py] 1 from coala_utils.string_processing.Core import escape 2 from coalib.bearlib.abstractions.Linter import linter 3 from dependency_management.requirements.DistributionRequirement import ( 4 DistributionRequirement) 5 from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY 6 7 8 @linter(executable='julia', 9 output_format='regex', 10 output_regex=r'.+:(?P<line>\d+) (?P<severity>.)\d+ (?P<message>.*)', 11 severity_map={'E': RESULT_SEVERITY.MAJOR, 12 'W': RESULT_SEVERITY.NORMAL, 13 'I': RESULT_SEVERITY.INFO}, 14 prerequisite_check_command=('julia', '-e', 'import Lint.lintfile'), 15 prerequisite_check_fail_message='Lint package not installed. Run ' 16 '`Pkg.add("Lint")` from Julia to ' 17 'install Lint.') 18 class JuliaLintBear: 19 """ 20 Provide analysis related to common bugs and potential issues in Julia like 21 dead code, undefined variable usage, duplicate keys in dicts, incorrect 22 ADT usage, wrongfully using ellipsis, and much more. 23 24 See <https://lintjl.readthedocs.org/en/stable/> for more information 25 on the analysis provided. 26 """ 27 LANGUAGES = {'Julia'} 28 REQUIREMENTS = {DistributionRequirement(apt_get='julia')} 29 AUTHORS = {'The coala developers'} 30 AUTHORS_EMAILS = {'[email protected]'} 31 LICENSE = 'AGPL-3.0' 32 CAN_DETECT = {'Unused Code', 'Syntax', 'Redundancy', 'Duplication', 33 'Unreachable Code', 'Security', 'Formatting'} 34 35 @staticmethod 36 def create_arguments(filename, file, config_file): 37 lintcode = ('import Lint.lintfile; display(lintfile("' + 38 escape(filename, '"\\') + '"))') 39 return '-e', lintcode 40 [end of bears/julia/JuliaLintBear.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bears/julia/JuliaLintBear.py b/bears/julia/JuliaLintBear.py --- a/bears/julia/JuliaLintBear.py +++ b/bears/julia/JuliaLintBear.py @@ -1,7 +1,7 @@ from coala_utils.string_processing.Core import escape from coalib.bearlib.abstractions.Linter import linter -from dependency_management.requirements.DistributionRequirement import ( - DistributionRequirement) +from dependency_management.requirements.JuliaRequirement import ( + JuliaRequirement) from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY @@ -25,7 +25,7 @@ on the analysis provided. """ LANGUAGES = {'Julia'} - REQUIREMENTS = {DistributionRequirement(apt_get='julia')} + REQUIREMENTS = {JuliaRequirement('Lint')} AUTHORS = {'The coala developers'} AUTHORS_EMAILS = {'[email protected]'} LICENSE = 'AGPL-3.0'
{"golden_diff": "diff --git a/bears/julia/JuliaLintBear.py b/bears/julia/JuliaLintBear.py\n--- a/bears/julia/JuliaLintBear.py\n+++ b/bears/julia/JuliaLintBear.py\n@@ -1,7 +1,7 @@\n from coala_utils.string_processing.Core import escape\n from coalib.bearlib.abstractions.Linter import linter\n-from dependency_management.requirements.DistributionRequirement import (\n- DistributionRequirement)\n+from dependency_management.requirements.JuliaRequirement import (\n+ JuliaRequirement)\n from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY\n \n \n@@ -25,7 +25,7 @@\n on the analysis provided.\n \"\"\"\n LANGUAGES = {'Julia'}\n- REQUIREMENTS = {DistributionRequirement(apt_get='julia')}\n+ REQUIREMENTS = {JuliaRequirement('Lint')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n", "issue": "JuliaLintBear: Use JuliaRequirement\n`REQUIREMENTS` should be a `JuliaRequirement` instead of a `DistributionRequirement`.\n", "before_files": [{"content": "from coala_utils.string_processing.Core import escape\nfrom coalib.bearlib.abstractions.Linter import linter\nfrom dependency_management.requirements.DistributionRequirement import (\n DistributionRequirement)\nfrom coalib.results.RESULT_SEVERITY import RESULT_SEVERITY\n\n\n@linter(executable='julia',\n output_format='regex',\n output_regex=r'.+:(?P<line>\\d+) (?P<severity>.)\\d+ (?P<message>.*)',\n severity_map={'E': RESULT_SEVERITY.MAJOR,\n 'W': RESULT_SEVERITY.NORMAL,\n 'I': RESULT_SEVERITY.INFO},\n prerequisite_check_command=('julia', '-e', 'import Lint.lintfile'),\n prerequisite_check_fail_message='Lint package not installed. Run '\n '`Pkg.add(\"Lint\")` from Julia to '\n 'install Lint.')\nclass JuliaLintBear:\n \"\"\"\n Provide analysis related to common bugs and potential issues in Julia like\n dead code, undefined variable usage, duplicate keys in dicts, incorrect\n ADT usage, wrongfully using ellipsis, and much more.\n\n See <https://lintjl.readthedocs.org/en/stable/> for more information\n on the analysis provided.\n \"\"\"\n LANGUAGES = {'Julia'}\n REQUIREMENTS = {DistributionRequirement(apt_get='julia')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_DETECT = {'Unused Code', 'Syntax', 'Redundancy', 'Duplication',\n 'Unreachable Code', 'Security', 'Formatting'}\n\n @staticmethod\n def create_arguments(filename, file, config_file):\n lintcode = ('import Lint.lintfile; display(lintfile(\"' +\n escape(filename, '\"\\\\') + '\"))')\n return '-e', lintcode\n", "path": "bears/julia/JuliaLintBear.py"}]}
1,048
224
gh_patches_debug_35924
rasdani/github-patches
git_diff
archlinux__archinstall-731
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Nvidia proprietary driver installation Hello I tried to install my system via this installer but i can't install nvidia proprietary driver with this installer. I think there is an option "Nvidia (proprietary)" but it still installing open source nvidia driver. https://github.com/archlinux/archinstall/blob/0071a069080732047e11309134869f3ab40c642c/archinstall/lib/hardware.py How can install proprietary drivers ? Instead of gfx_driver option adding nvidia package names to packages option like that : ```"packages": ["bash-completion","linux-headers","nvidia","nvidia-settings","opencl-nvidia"]``` My config file : ``` { "bootloader": "systemd-bootctl", "audio": "pulseaudio", "filesystem": "ext4", "gfx_driver": "Nvidia (proprietary)", "harddrive": { "path": "/dev/sda" }, "hostname": "pc", "kernels": ["linux"], "keyboard-language": "trq", "mirror-region": {}, "nic": { "NetworkManager": true }, "profile": "kde", "ntp": true, "packages": ["bash-completion", "linux-headers"], "sys-encoding": "utf-8", "timezone": "UTC", "!root-password": "****", "superusers": { "kaan": { "!password": "****" } } } ``` Thank you. </issue> <code> [start of profiles/xorg.py] 1 # A system with "xorg" installed 2 3 import archinstall 4 5 is_top_level_profile = True 6 7 __description__ = 'Installs a minimal system as well as xorg and graphics drivers.' 8 9 __packages__ = [ 10 'dkms', 11 'xorg-server', 12 'xorg-xinit', 13 'nvidia-dkms', 14 *archinstall.lib.hardware.__packages__, 15 ] 16 17 18 def _prep_function(*args, **kwargs): 19 """ 20 Magic function called by the importing installer 21 before continuing any further. It also avoids executing any 22 other code in this stage. So it's a safe way to ask the user 23 for more input before any other installer steps start. 24 """ 25 26 archinstall.storage["gfx_driver_packages"] = archinstall.select_driver() 27 28 # TODO: Add language section and/or merge it with the locale selected 29 # earlier in for instance guided.py installer. 30 31 return True 32 33 34 # Ensures that this code only gets executed if executed 35 # through importlib.util.spec_from_file_location("xorg", "/somewhere/xorg.py") 36 # or through conventional import xorg 37 if __name__ == 'xorg': 38 try: 39 if "nvidia" in archinstall.storage.get("gfx_driver_packages", None): 40 if "linux-zen" in archinstall.storage['installation_session'].base_packages or "linux-lts" in archinstall.storage['installation_session'].base_packages: 41 for kernel in archinstall.storage['installation_session'].kernels: 42 archinstall.storage['installation_session'].add_additional_packages(f"{kernel}-headers") # Fixes https://github.com/archlinux/archinstall/issues/585 43 archinstall.storage['installation_session'].add_additional_packages("dkms") # I've had kernel regen fail if it wasn't installed before nvidia-dkms 44 archinstall.storage['installation_session'].add_additional_packages("xorg-server xorg-xinit nvidia-dkms") 45 else: 46 archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', None))}") 47 else: 48 archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', None))}") 49 except: 50 archinstall.storage['installation_session'].add_additional_packages("xorg-server xorg-xinit") # Prep didn't run, so there's no driver to install 51 [end of profiles/xorg.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/profiles/xorg.py b/profiles/xorg.py --- a/profiles/xorg.py +++ b/profiles/xorg.py @@ -1,6 +1,7 @@ # A system with "xorg" installed import archinstall +import logging is_top_level_profile = True @@ -36,15 +37,16 @@ # or through conventional import xorg if __name__ == 'xorg': try: - if "nvidia" in archinstall.storage.get("gfx_driver_packages", None): + if "nvidia" in archinstall.storage.get("gfx_driver_packages", []): if "linux-zen" in archinstall.storage['installation_session'].base_packages or "linux-lts" in archinstall.storage['installation_session'].base_packages: for kernel in archinstall.storage['installation_session'].kernels: archinstall.storage['installation_session'].add_additional_packages(f"{kernel}-headers") # Fixes https://github.com/archlinux/archinstall/issues/585 archinstall.storage['installation_session'].add_additional_packages("dkms") # I've had kernel regen fail if it wasn't installed before nvidia-dkms archinstall.storage['installation_session'].add_additional_packages("xorg-server xorg-xinit nvidia-dkms") else: - archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', None))}") + archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}") else: - archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', None))}") - except: + archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}") + except Exception as err: + archinstall.log(f"Could not handle nvidia and linuz-zen specific situations during xorg installation: {err}", level=logging.WARNING, fg="yellow") archinstall.storage['installation_session'].add_additional_packages("xorg-server xorg-xinit") # Prep didn't run, so there's no driver to install
{"golden_diff": "diff --git a/profiles/xorg.py b/profiles/xorg.py\n--- a/profiles/xorg.py\n+++ b/profiles/xorg.py\n@@ -1,6 +1,7 @@\n # A system with \"xorg\" installed\n \n import archinstall\n+import logging\n \n is_top_level_profile = True\n \n@@ -36,15 +37,16 @@\n # or through conventional import xorg\n if __name__ == 'xorg':\n \ttry:\n-\t\tif \"nvidia\" in archinstall.storage.get(\"gfx_driver_packages\", None):\n+\t\tif \"nvidia\" in archinstall.storage.get(\"gfx_driver_packages\", []):\n \t\t\tif \"linux-zen\" in archinstall.storage['installation_session'].base_packages or \"linux-lts\" in archinstall.storage['installation_session'].base_packages:\n \t\t\t\tfor kernel in archinstall.storage['installation_session'].kernels:\n \t\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"{kernel}-headers\") # Fixes https://github.com/archlinux/archinstall/issues/585\n \t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(\"dkms\") # I've had kernel regen fail if it wasn't installed before nvidia-dkms\n \t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(\"xorg-server xorg-xinit nvidia-dkms\")\n \t\t\telse:\n-\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', None))}\")\n+\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}\")\n \t\telse:\n-\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', None))}\")\n-\texcept:\n+\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}\")\n+\texcept Exception as err:\n+\t\tarchinstall.log(f\"Could not handle nvidia and linuz-zen specific situations during xorg installation: {err}\", level=logging.WARNING, fg=\"yellow\")\n \t\tarchinstall.storage['installation_session'].add_additional_packages(\"xorg-server xorg-xinit\") # Prep didn't run, so there's no driver to install\n", "issue": "Nvidia proprietary driver installation\nHello \r\n\r\nI tried to install my system via this installer but i can't install nvidia proprietary driver with this installer.\r\n\r\nI think there is an option \"Nvidia (proprietary)\" but it still installing open source nvidia driver.\r\nhttps://github.com/archlinux/archinstall/blob/0071a069080732047e11309134869f3ab40c642c/archinstall/lib/hardware.py\r\n\r\nHow can install proprietary drivers ?\r\nInstead of gfx_driver option adding nvidia package names to packages option like that :\r\n```\"packages\": [\"bash-completion\",\"linux-headers\",\"nvidia\",\"nvidia-settings\",\"opencl-nvidia\"]```\r\n\r\n\r\n\r\nMy config file :\r\n\r\n```\r\n{\r\n \"bootloader\": \"systemd-bootctl\",\r\n \"audio\": \"pulseaudio\",\r\n \"filesystem\": \"ext4\",\r\n \"gfx_driver\": \"Nvidia (proprietary)\",\r\n \"harddrive\": {\r\n \"path\": \"/dev/sda\"\r\n },\r\n \"hostname\": \"pc\",\r\n \"kernels\": [\"linux\"],\r\n \"keyboard-language\": \"trq\",\r\n \"mirror-region\": {},\r\n \"nic\": {\r\n \"NetworkManager\": true\r\n },\r\n \"profile\": \"kde\",\r\n \"ntp\": true,\r\n \"packages\": [\"bash-completion\", \"linux-headers\"],\r\n \"sys-encoding\": \"utf-8\",\r\n \"timezone\": \"UTC\",\r\n \"!root-password\": \"****\",\r\n \"superusers\": {\r\n \"kaan\": {\r\n \"!password\": \"****\"\r\n }\r\n }\r\n}\r\n```\r\n\r\nThank you.\r\n\n", "before_files": [{"content": "# A system with \"xorg\" installed\n\nimport archinstall\n\nis_top_level_profile = True\n\n__description__ = 'Installs a minimal system as well as xorg and graphics drivers.'\n\n__packages__ = [\n\t'dkms',\n\t'xorg-server',\n\t'xorg-xinit',\n\t'nvidia-dkms',\n\t*archinstall.lib.hardware.__packages__,\n]\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\tarchinstall.storage[\"gfx_driver_packages\"] = archinstall.select_driver()\n\n\t# TODO: Add language section and/or merge it with the locale selected\n\t# earlier in for instance guided.py installer.\n\n\treturn True\n\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"xorg\", \"/somewhere/xorg.py\")\n# or through conventional import xorg\nif __name__ == 'xorg':\n\ttry:\n\t\tif \"nvidia\" in archinstall.storage.get(\"gfx_driver_packages\", None):\n\t\t\tif \"linux-zen\" in archinstall.storage['installation_session'].base_packages or \"linux-lts\" in archinstall.storage['installation_session'].base_packages:\n\t\t\t\tfor kernel in archinstall.storage['installation_session'].kernels:\n\t\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"{kernel}-headers\") # Fixes https://github.com/archlinux/archinstall/issues/585\n\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(\"dkms\") # I've had kernel regen fail if it wasn't installed before nvidia-dkms\n\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(\"xorg-server xorg-xinit nvidia-dkms\")\n\t\t\telse:\n\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', None))}\")\n\t\telse:\n\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', None))}\")\n\texcept:\n\t\tarchinstall.storage['installation_session'].add_additional_packages(\"xorg-server xorg-xinit\") # Prep didn't run, so there's no driver to install\n", "path": "profiles/xorg.py"}]}
1,525
520
gh_patches_debug_5418
rasdani/github-patches
git_diff
saulpw__visidata-625
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [help keystrokes] Help keystrokes has longnames for about 20 commands **Small description** Keystrokes in HelpSheet display commands not keystrokes for about 20 commands. **Expected result** Keystroke values **Actual result with screenshot** sheet longname keystrokes TableSheet go-next-selected next-selected TableSheet go-prev-selected prev-selected TableSheet setcol-clipboard paste-cells TableSheet search-prev prev-search TableSheet setcol-input edit-cells TableSheet setcol-fill fill-nulls TableSheet go-next-value next-value TableSheet go-right-page page-right BaseSheet jump-prev prev-sheet TableSheet go-prev-value prev-value BaseSheet open-new add-sheet TableSheet go-next-null next-null TableSheet go-left-page page-left TableSheet go-prev-null prev-null TableSheet open-row dive-row DirSheet open-row dive-row BaseSheet open-statuses statuses Canvas zoomout-mouse 2097152 BaseSheet show-status status **Steps to reproduce with sample data and a .vd** Visit the help sheet, and look for the long commands above. **Additional context** Please include the version of VisiData. Latest from develop </issue> <code> [start of visidata/help.py] 1 from visidata import * 2 3 4 class HelpSheet(MetaSheet): 5 'Show all commands available to the source sheet.' 6 rowtype = 'commands' 7 precious = False 8 _ordering = [('sheet', False), ('longname', False)] 9 10 columns = [ 11 ColumnAttr('sheet'), 12 ColumnAttr('longname'), 13 Column('keystrokes', getter=lambda col,row: col.sheet.revbinds.get(row.longname)), 14 Column('description', getter=lambda col,row: col.sheet.cmddict[(row.sheet, row.longname)].helpstr), 15 ColumnAttr('execstr', width=0), 16 Column('logged', width=0, getter=lambda col,row: isLoggableCommand(row.longname)), 17 ] 18 nKeys = 2 19 20 def iterload(self): 21 from pkg_resources import resource_filename 22 cmdlist = VisiDataMetaSheet('cmdlist', source=None) 23 24 self.cmddict = {} 25 itcmds = vd.commands.iterall() 26 for (k, o), v in itcmds: 27 yield v 28 v.sheet = o 29 self.cmddict[(v.sheet, v.longname)] = v 30 31 for cmdrow in cmdlist.rows: 32 k = (cmdrow.sheet, cmdrow.longname) 33 if k in self.cmddict: 34 self.cmddict[k].helpstr = cmdrow.helpstr 35 36 self.revbinds = {} # [longname] -> keystrokes 37 itbindings = vd.bindkeys.iterall() 38 for (keystrokes, _), longname in itbindings: 39 if keystrokes not in self.revbinds: 40 self.revbinds[longname] = keystrokes 41 42 43 @VisiData.api 44 @asyncthread 45 def help_search(vd, sheet, regex): 46 vs = HelpSheet(source=None) 47 vs.rows = [] # do not trigger push reload 48 vd.push(vs) # push first, then reload 49 vd.sync(vs.reload()) 50 51 # find rows matching regex on original HelpSheet 52 rowidxs = list(vd.searchRegex(vs, regex=regex, columns="visibleCols")) 53 54 # add only matching rows 55 allrows = vs.rows 56 vs.rows = [] 57 for rowidx in rowidxs: 58 vs.addRow(allrows[rowidx]) 59 60 61 @VisiData.global_api 62 def openManPage(vd): 63 from pkg_resources import resource_filename 64 import os 65 with SuspendCurses(): 66 os.system(' '.join(['man', resource_filename(__name__, 'man/vd.1')])) 67 68 69 # in VisiData, ^H refers to the man page 70 globalCommand('^H', 'sysopen-help', 'openManPage()', 'view vd man page') 71 BaseSheet.addCommand('z^H', 'help-commands', 'vd.push(HelpSheet(name + "_commands", source=sheet, revbinds={}))', 'view sheet of command longnames and keybindings for current sheet') 72 BaseSheet.addCommand('gz^H', 'help-commands-all', 'vd.push(HelpSheet("all_commands", source=None, revbinds={}))', 'view sheet of command longnames and keybindings for all sheet types') 73 globalCommand(None, 'help-search', 'help_search(sheet, input("help: "))', 'search through command longnames with search terms') 74 75 BaseSheet.bindkey('KEY_F(1)', 'sysopen-help') 76 BaseSheet.bindkey('KEY_BACKSPACE', 'sysopen-help') 77 BaseSheet.bindkey('zKEY_F(1)', 'help-commands') 78 BaseSheet.bindkey('zKEY_BACKSPACE', 'help-commands') 79 [end of visidata/help.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/visidata/help.py b/visidata/help.py --- a/visidata/help.py +++ b/visidata/help.py @@ -36,7 +36,7 @@ self.revbinds = {} # [longname] -> keystrokes itbindings = vd.bindkeys.iterall() for (keystrokes, _), longname in itbindings: - if keystrokes not in self.revbinds: + if (keystrokes not in self.revbinds) and ('-' not in keystrokes or keystrokes[-1] == '-'): self.revbinds[longname] = keystrokes
{"golden_diff": "diff --git a/visidata/help.py b/visidata/help.py\n--- a/visidata/help.py\n+++ b/visidata/help.py\n@@ -36,7 +36,7 @@\n self.revbinds = {} # [longname] -> keystrokes\n itbindings = vd.bindkeys.iterall()\n for (keystrokes, _), longname in itbindings:\n- if keystrokes not in self.revbinds:\n+ if (keystrokes not in self.revbinds) and ('-' not in keystrokes or keystrokes[-1] == '-'):\n self.revbinds[longname] = keystrokes\n", "issue": "[help keystrokes] Help keystrokes has longnames for about 20 commands\n**Small description**\r\nKeystrokes in HelpSheet display commands not keystrokes for about 20 commands.\r\n\r\n**Expected result**\r\nKeystroke values\r\n\r\n**Actual result with screenshot**\r\n\r\nsheet\tlongname\tkeystrokes\r\nTableSheet\tgo-next-selected\tnext-selected\r\nTableSheet\tgo-prev-selected\tprev-selected\r\nTableSheet\tsetcol-clipboard\tpaste-cells\r\nTableSheet\tsearch-prev\tprev-search\r\nTableSheet\tsetcol-input\tedit-cells\r\nTableSheet\tsetcol-fill\tfill-nulls\r\nTableSheet\tgo-next-value\tnext-value\r\nTableSheet\tgo-right-page\tpage-right\r\nBaseSheet\tjump-prev\tprev-sheet\r\nTableSheet\tgo-prev-value\tprev-value\r\nBaseSheet\topen-new\tadd-sheet\r\nTableSheet\tgo-next-null\tnext-null\r\nTableSheet\tgo-left-page\tpage-left\r\nTableSheet\tgo-prev-null\tprev-null\r\nTableSheet\topen-row\tdive-row\r\nDirSheet\topen-row\tdive-row\r\nBaseSheet\topen-statuses\tstatuses\r\nCanvas\tzoomout-mouse\t2097152\r\nBaseSheet\tshow-status\tstatus\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\nVisit the help sheet, and look for the long commands above.\r\n\r\n**Additional context**\r\nPlease include the version of VisiData. Latest from develop\r\n\n", "before_files": [{"content": "from visidata import *\n\n\nclass HelpSheet(MetaSheet):\n 'Show all commands available to the source sheet.'\n rowtype = 'commands'\n precious = False\n _ordering = [('sheet', False), ('longname', False)]\n\n columns = [\n ColumnAttr('sheet'),\n ColumnAttr('longname'),\n Column('keystrokes', getter=lambda col,row: col.sheet.revbinds.get(row.longname)),\n Column('description', getter=lambda col,row: col.sheet.cmddict[(row.sheet, row.longname)].helpstr),\n ColumnAttr('execstr', width=0),\n Column('logged', width=0, getter=lambda col,row: isLoggableCommand(row.longname)),\n ]\n nKeys = 2\n\n def iterload(self):\n from pkg_resources import resource_filename\n cmdlist = VisiDataMetaSheet('cmdlist', source=None)\n\n self.cmddict = {}\n itcmds = vd.commands.iterall()\n for (k, o), v in itcmds:\n yield v\n v.sheet = o\n self.cmddict[(v.sheet, v.longname)] = v\n\n for cmdrow in cmdlist.rows:\n k = (cmdrow.sheet, cmdrow.longname)\n if k in self.cmddict:\n self.cmddict[k].helpstr = cmdrow.helpstr\n\n self.revbinds = {} # [longname] -> keystrokes\n itbindings = vd.bindkeys.iterall()\n for (keystrokes, _), longname in itbindings:\n if keystrokes not in self.revbinds:\n self.revbinds[longname] = keystrokes\n\n\[email protected]\n@asyncthread\ndef help_search(vd, sheet, regex):\n vs = HelpSheet(source=None)\n vs.rows = [] # do not trigger push reload\n vd.push(vs) # push first, then reload\n vd.sync(vs.reload())\n\n # find rows matching regex on original HelpSheet\n rowidxs = list(vd.searchRegex(vs, regex=regex, columns=\"visibleCols\"))\n\n # add only matching rows\n allrows = vs.rows\n vs.rows = []\n for rowidx in rowidxs:\n vs.addRow(allrows[rowidx])\n\n\[email protected]_api\ndef openManPage(vd):\n from pkg_resources import resource_filename\n import os\n with SuspendCurses():\n os.system(' '.join(['man', resource_filename(__name__, 'man/vd.1')]))\n\n\n# in VisiData, ^H refers to the man page\nglobalCommand('^H', 'sysopen-help', 'openManPage()', 'view vd man page')\nBaseSheet.addCommand('z^H', 'help-commands', 'vd.push(HelpSheet(name + \"_commands\", source=sheet, revbinds={}))', 'view sheet of command longnames and keybindings for current sheet')\nBaseSheet.addCommand('gz^H', 'help-commands-all', 'vd.push(HelpSheet(\"all_commands\", source=None, revbinds={}))', 'view sheet of command longnames and keybindings for all sheet types')\nglobalCommand(None, 'help-search', 'help_search(sheet, input(\"help: \"))', 'search through command longnames with search terms')\n\nBaseSheet.bindkey('KEY_F(1)', 'sysopen-help')\nBaseSheet.bindkey('KEY_BACKSPACE', 'sysopen-help')\nBaseSheet.bindkey('zKEY_F(1)', 'help-commands')\nBaseSheet.bindkey('zKEY_BACKSPACE', 'help-commands')\n", "path": "visidata/help.py"}]}
1,726
142
gh_patches_debug_4466
rasdani/github-patches
git_diff
googleapis__google-cloud-python-5899
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Default max_bytes in pubsub seems wrong https://github.com/GoogleCloudPlatform/google-cloud-python/blob/005ddd11770f8953643dac9b64a037524da2f49e/pubsub/google/cloud/pubsub_v1/types.py#L43 In my code I'm seeing this: ``` google.api_core.exceptions.InvalidArgument: 400 The value for 10159812 is too large. You passed request_size in the request, but the maximum value is 10000000. ``` Based on the error coming out of the pubsub, I think this default max size is wrong. </issue> <code> [start of pubsub/google/cloud/pubsub_v1/types.py] 1 # Copyright 2017, Google LLC All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from __future__ import absolute_import 16 import collections 17 import sys 18 19 from google.api import http_pb2 20 from google.iam.v1 import iam_policy_pb2 21 from google.iam.v1 import policy_pb2 22 from google.iam.v1.logging import audit_data_pb2 23 from google.protobuf import descriptor_pb2 24 from google.protobuf import duration_pb2 25 from google.protobuf import empty_pb2 26 from google.protobuf import field_mask_pb2 27 from google.protobuf import timestamp_pb2 28 29 from google.api_core.protobuf_helpers import get_messages 30 from google.cloud.pubsub_v1.proto import pubsub_pb2 31 32 33 # Define the default values for batching. 34 # 35 # This class is used when creating a publisher or subscriber client, and 36 # these settings can be altered to tweak Pub/Sub behavior. 37 # The defaults should be fine for most use cases. 38 BatchSettings = collections.namedtuple( 39 'BatchSettings', 40 ['max_bytes', 'max_latency', 'max_messages'], 41 ) 42 BatchSettings.__new__.__defaults__ = ( 43 1024 * 1024 * 10, # max_bytes: 10 MB 44 0.05, # max_latency: 0.05 seconds 45 1000, # max_messages: 1,000 46 ) 47 48 # Define the type class and default values for flow control settings. 49 # 50 # This class is used when creating a publisher or subscriber client, and 51 # these settings can be altered to tweak Pub/Sub behavior. 52 # The defaults should be fine for most use cases. 53 FlowControl = collections.namedtuple( 54 'FlowControl', 55 ['max_bytes', 'max_messages', 'resume_threshold', 'max_requests', 56 'max_request_batch_size', 'max_request_batch_latency', 57 'max_lease_duration'], 58 ) 59 FlowControl.__new__.__defaults__ = ( 60 100 * 1024 * 1024, # max_bytes: 100mb 61 100, # max_messages: 100 62 0.8, # resume_threshold: 80% 63 100, # max_requests: 100 64 100, # max_request_batch_size: 100 65 0.01, # max_request_batch_latency: 0.01s 66 2 * 60 * 60, # max_lease_duration: 2 hours. 67 ) 68 69 70 _shared_modules = [ 71 http_pb2, 72 iam_policy_pb2, 73 policy_pb2, 74 audit_data_pb2, 75 descriptor_pb2, 76 duration_pb2, 77 empty_pb2, 78 field_mask_pb2, 79 timestamp_pb2, 80 ] 81 82 _local_modules = [ 83 pubsub_pb2, 84 ] 85 86 87 names = ['BatchSettings', 'FlowControl'] 88 89 90 for module in _shared_modules: 91 for name, message in get_messages(module).items(): 92 setattr(sys.modules[__name__], name, message) 93 names.append(name) 94 95 for module in _local_modules: 96 for name, message in get_messages(module).items(): 97 message.__module__ = 'google.cloud.pubsub_v1.types' 98 setattr(sys.modules[__name__], name, message) 99 names.append(name) 100 101 102 __all__ = tuple(sorted(names)) 103 [end of pubsub/google/cloud/pubsub_v1/types.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pubsub/google/cloud/pubsub_v1/types.py b/pubsub/google/cloud/pubsub_v1/types.py --- a/pubsub/google/cloud/pubsub_v1/types.py +++ b/pubsub/google/cloud/pubsub_v1/types.py @@ -40,7 +40,7 @@ ['max_bytes', 'max_latency', 'max_messages'], ) BatchSettings.__new__.__defaults__ = ( - 1024 * 1024 * 10, # max_bytes: 10 MB + 1000 * 1000 * 10, # max_bytes: documented "10 MB", enforced 10000000 0.05, # max_latency: 0.05 seconds 1000, # max_messages: 1,000 )
{"golden_diff": "diff --git a/pubsub/google/cloud/pubsub_v1/types.py b/pubsub/google/cloud/pubsub_v1/types.py\n--- a/pubsub/google/cloud/pubsub_v1/types.py\n+++ b/pubsub/google/cloud/pubsub_v1/types.py\n@@ -40,7 +40,7 @@\n ['max_bytes', 'max_latency', 'max_messages'],\n )\n BatchSettings.__new__.__defaults__ = (\n- 1024 * 1024 * 10, # max_bytes: 10 MB\n+ 1000 * 1000 * 10, # max_bytes: documented \"10 MB\", enforced 10000000\n 0.05, # max_latency: 0.05 seconds\n 1000, # max_messages: 1,000\n )\n", "issue": "Default max_bytes in pubsub seems wrong\nhttps://github.com/GoogleCloudPlatform/google-cloud-python/blob/005ddd11770f8953643dac9b64a037524da2f49e/pubsub/google/cloud/pubsub_v1/types.py#L43\r\n\r\nIn my code I'm seeing this:\r\n```\r\ngoogle.api_core.exceptions.InvalidArgument: 400 The value for 10159812 is too large. You passed request_size in the request, but the maximum value is 10000000.\r\n```\r\n\r\nBased on the error coming out of the pubsub, I think this default max size is wrong.\n", "before_files": [{"content": "# Copyright 2017, Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport collections\nimport sys\n\nfrom google.api import http_pb2\nfrom google.iam.v1 import iam_policy_pb2\nfrom google.iam.v1 import policy_pb2\nfrom google.iam.v1.logging import audit_data_pb2\nfrom google.protobuf import descriptor_pb2\nfrom google.protobuf import duration_pb2\nfrom google.protobuf import empty_pb2\nfrom google.protobuf import field_mask_pb2\nfrom google.protobuf import timestamp_pb2\n\nfrom google.api_core.protobuf_helpers import get_messages\nfrom google.cloud.pubsub_v1.proto import pubsub_pb2\n\n\n# Define the default values for batching.\n#\n# This class is used when creating a publisher or subscriber client, and\n# these settings can be altered to tweak Pub/Sub behavior.\n# The defaults should be fine for most use cases.\nBatchSettings = collections.namedtuple(\n 'BatchSettings',\n ['max_bytes', 'max_latency', 'max_messages'],\n)\nBatchSettings.__new__.__defaults__ = (\n 1024 * 1024 * 10, # max_bytes: 10 MB\n 0.05, # max_latency: 0.05 seconds\n 1000, # max_messages: 1,000\n)\n\n# Define the type class and default values for flow control settings.\n#\n# This class is used when creating a publisher or subscriber client, and\n# these settings can be altered to tweak Pub/Sub behavior.\n# The defaults should be fine for most use cases.\nFlowControl = collections.namedtuple(\n 'FlowControl',\n ['max_bytes', 'max_messages', 'resume_threshold', 'max_requests',\n 'max_request_batch_size', 'max_request_batch_latency',\n 'max_lease_duration'],\n)\nFlowControl.__new__.__defaults__ = (\n 100 * 1024 * 1024, # max_bytes: 100mb\n 100, # max_messages: 100\n 0.8, # resume_threshold: 80%\n 100, # max_requests: 100\n 100, # max_request_batch_size: 100\n 0.01, # max_request_batch_latency: 0.01s\n 2 * 60 * 60, # max_lease_duration: 2 hours.\n)\n\n\n_shared_modules = [\n http_pb2,\n iam_policy_pb2,\n policy_pb2,\n audit_data_pb2,\n descriptor_pb2,\n duration_pb2,\n empty_pb2,\n field_mask_pb2,\n timestamp_pb2,\n]\n\n_local_modules = [\n pubsub_pb2,\n]\n\n\nnames = ['BatchSettings', 'FlowControl']\n\n\nfor module in _shared_modules:\n for name, message in get_messages(module).items():\n setattr(sys.modules[__name__], name, message)\n names.append(name)\n\nfor module in _local_modules:\n for name, message in get_messages(module).items():\n message.__module__ = 'google.cloud.pubsub_v1.types'\n setattr(sys.modules[__name__], name, message)\n names.append(name)\n\n\n__all__ = tuple(sorted(names))\n", "path": "pubsub/google/cloud/pubsub_v1/types.py"}]}
1,743
195
gh_patches_debug_11467
rasdani/github-patches
git_diff
scoutapp__scout_apm_python-523
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error with the scout add-on in Heroku With version 2.14.1 of this package, I encounter the following error: ` File "/code/nluproxy/__init__.py", line 3, in <module> from .celery import app as celery_app File "/code/nluproxy/celery.py", line 12, in <module> scout_apm.celery.install(app) File "/usr/local/lib/python3.7/site-packages/scout_apm/celery.py", line 58, in install installed = scout_apm.core.install() File "/usr/local/lib/python3.7/site-packages/scout_apm/compat.py", line 132, in wrapper return func(*args, **kwargs) File "/usr/local/lib/python3.7/site-packages/scout_apm/core/__init__.py", line 46, in install report_app_metadata() File "/usr/local/lib/python3.7/site-packages/scout_apm/core/metadata.py", line 17, in report_app_metadata event_value=get_metadata(), File "/usr/local/lib/python3.7/site-packages/scout_apm/core/metadata.py", line 37, in get_metadata "libraries": get_python_packages_versions(), File "/usr/local/lib/python3.7/site-packages/scout_apm/core/metadata.py", line 60, in get_python_packages_versions for distribution in distributions() TypeError: '<' not supported between instances of 'NoneType' and 'str' ` </issue> <code> [start of src/scout_apm/core/metadata.py] 1 # coding=utf-8 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 import datetime as dt 5 import sys 6 from os import getpid 7 8 from scout_apm.core.commands import ApplicationEvent 9 from scout_apm.core.config import scout_config 10 from scout_apm.core.socket import CoreAgentSocketThread 11 12 13 def report_app_metadata(): 14 CoreAgentSocketThread.send( 15 ApplicationEvent( 16 event_type="scout.metadata", 17 event_value=get_metadata(), 18 source="Pid: " + str(getpid()), 19 timestamp=dt.datetime.utcnow(), 20 ) 21 ) 22 23 24 def get_metadata(): 25 data = { 26 "language": "python", 27 "language_version": "{}.{}.{}".format(*sys.version_info[:3]), 28 "server_time": dt.datetime.utcnow().isoformat() + "Z", 29 "framework": scout_config.value("framework"), 30 "framework_version": scout_config.value("framework_version"), 31 "environment": "", 32 "app_server": scout_config.value("app_server"), 33 "hostname": scout_config.value("hostname"), 34 "database_engine": "", 35 "database_adapter": "", 36 "application_name": "", 37 "libraries": get_python_packages_versions(), 38 "paas": "", 39 "application_root": scout_config.value("application_root"), 40 "scm_subdirectory": scout_config.value("scm_subdirectory"), 41 "git_sha": scout_config.value("revision_sha"), 42 } 43 # Deprecated - see #327: 44 data["version"] = data["language_version"] 45 return data 46 47 48 def get_python_packages_versions(): 49 try: 50 if sys.version_info >= (3, 8): 51 from importlib.metadata import distributions 52 else: 53 from importlib_metadata import distributions 54 except ImportError: 55 # For some reason it is unavailable 56 return [] 57 58 return sorted( 59 (distribution.metadata["Name"], distribution.metadata["Version"]) 60 for distribution in distributions() 61 ) 62 [end of src/scout_apm/core/metadata.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/scout_apm/core/metadata.py b/src/scout_apm/core/metadata.py --- a/src/scout_apm/core/metadata.py +++ b/src/scout_apm/core/metadata.py @@ -56,6 +56,15 @@ return [] return sorted( - (distribution.metadata["Name"], distribution.metadata["Version"]) + ( + distribution.metadata["Name"], + (distribution.metadata["Version"] or "Unknown"), + ) for distribution in distributions() + # Filter out distributions wtih None for name or value. This can be the + # case for packages without a METADATA or PKG-INFO file in their relevant + # distribution directory. According to comments in importlib.metadata + # internals this is possible for certain old packages, but I could only + # recreate it by deliberately deleting said files. + if distribution.metadata["Name"] )
{"golden_diff": "diff --git a/src/scout_apm/core/metadata.py b/src/scout_apm/core/metadata.py\n--- a/src/scout_apm/core/metadata.py\n+++ b/src/scout_apm/core/metadata.py\n@@ -56,6 +56,15 @@\n return []\n \n return sorted(\n- (distribution.metadata[\"Name\"], distribution.metadata[\"Version\"])\n+ (\n+ distribution.metadata[\"Name\"],\n+ (distribution.metadata[\"Version\"] or \"Unknown\"),\n+ )\n for distribution in distributions()\n+ # Filter out distributions wtih None for name or value. This can be the\n+ # case for packages without a METADATA or PKG-INFO file in their relevant\n+ # distribution directory. According to comments in importlib.metadata\n+ # internals this is possible for certain old packages, but I could only\n+ # recreate it by deliberately deleting said files.\n+ if distribution.metadata[\"Name\"]\n )\n", "issue": "Error with the scout add-on in Heroku\nWith version 2.14.1 of this package, I encounter the following error:\r\n` File \"/code/nluproxy/__init__.py\", line 3, in <module>\r\n\r\n from .celery import app as celery_app\r\n\r\n File \"/code/nluproxy/celery.py\", line 12, in <module>\r\n\r\n scout_apm.celery.install(app)\r\n\r\n File \"/usr/local/lib/python3.7/site-packages/scout_apm/celery.py\", line 58, in install\r\n\r\n installed = scout_apm.core.install()\r\n\r\n File \"/usr/local/lib/python3.7/site-packages/scout_apm/compat.py\", line 132, in wrapper\r\n\r\n return func(*args, **kwargs)\r\n\r\n File \"/usr/local/lib/python3.7/site-packages/scout_apm/core/__init__.py\", line 46, in install\r\n\r\n report_app_metadata()\r\n\r\n File \"/usr/local/lib/python3.7/site-packages/scout_apm/core/metadata.py\", line 17, in report_app_metadata\r\n\r\n event_value=get_metadata(),\r\n\r\n File \"/usr/local/lib/python3.7/site-packages/scout_apm/core/metadata.py\", line 37, in get_metadata\r\n\r\n \"libraries\": get_python_packages_versions(),\r\n\r\n File \"/usr/local/lib/python3.7/site-packages/scout_apm/core/metadata.py\", line 60, in get_python_packages_versions\r\n\r\n for distribution in distributions()\r\n\r\nTypeError: '<' not supported between instances of 'NoneType' and 'str'\r\n\r\n`\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport sys\nfrom os import getpid\n\nfrom scout_apm.core.commands import ApplicationEvent\nfrom scout_apm.core.config import scout_config\nfrom scout_apm.core.socket import CoreAgentSocketThread\n\n\ndef report_app_metadata():\n CoreAgentSocketThread.send(\n ApplicationEvent(\n event_type=\"scout.metadata\",\n event_value=get_metadata(),\n source=\"Pid: \" + str(getpid()),\n timestamp=dt.datetime.utcnow(),\n )\n )\n\n\ndef get_metadata():\n data = {\n \"language\": \"python\",\n \"language_version\": \"{}.{}.{}\".format(*sys.version_info[:3]),\n \"server_time\": dt.datetime.utcnow().isoformat() + \"Z\",\n \"framework\": scout_config.value(\"framework\"),\n \"framework_version\": scout_config.value(\"framework_version\"),\n \"environment\": \"\",\n \"app_server\": scout_config.value(\"app_server\"),\n \"hostname\": scout_config.value(\"hostname\"),\n \"database_engine\": \"\",\n \"database_adapter\": \"\",\n \"application_name\": \"\",\n \"libraries\": get_python_packages_versions(),\n \"paas\": \"\",\n \"application_root\": scout_config.value(\"application_root\"),\n \"scm_subdirectory\": scout_config.value(\"scm_subdirectory\"),\n \"git_sha\": scout_config.value(\"revision_sha\"),\n }\n # Deprecated - see #327:\n data[\"version\"] = data[\"language_version\"]\n return data\n\n\ndef get_python_packages_versions():\n try:\n if sys.version_info >= (3, 8):\n from importlib.metadata import distributions\n else:\n from importlib_metadata import distributions\n except ImportError:\n # For some reason it is unavailable\n return []\n\n return sorted(\n (distribution.metadata[\"Name\"], distribution.metadata[\"Version\"])\n for distribution in distributions()\n )\n", "path": "src/scout_apm/core/metadata.py"}]}
1,403
205
gh_patches_debug_7171
rasdani/github-patches
git_diff
matrix-org__synapse-14727
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> synapse returns undocumented `device` property from `GET /_matrix/client/v3/pushrules/` Synapse's response to a `/pushrules` query looks something like: ```json5 [ "global": { "content": { /*... */ }, "override": { /*... */ }, "room": { /*... */ }, "sender": { /*... */ }, "underride": { /*... */ } }, "device": {} ] ``` This `device` property is not [specced](https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv3pushrules) and should be removed. </issue> <code> [start of synapse/push/clientformat.py] 1 # Copyright 2016 OpenMarket Ltd 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import copy 16 from typing import Any, Dict, List, Optional 17 18 from synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP 19 from synapse.synapse_rust.push import FilteredPushRules, PushRule 20 from synapse.types import UserID 21 22 23 def format_push_rules_for_user( 24 user: UserID, ruleslist: FilteredPushRules 25 ) -> Dict[str, Dict[str, list]]: 26 """Converts a list of rawrules and a enabled map into nested dictionaries 27 to match the Matrix client-server format for push rules""" 28 29 rules: Dict[str, Dict[str, List[Dict[str, Any]]]] = { 30 "global": {}, 31 "device": {}, 32 } 33 34 rules["global"] = _add_empty_priority_class_arrays(rules["global"]) 35 36 for r, enabled in ruleslist.rules(): 37 template_name = _priority_class_to_template_name(r.priority_class) 38 39 rulearray = rules["global"][template_name] 40 41 template_rule = _rule_to_template(r) 42 if not template_rule: 43 continue 44 45 rulearray.append(template_rule) 46 47 pattern_type = template_rule.pop("pattern_type", None) 48 if pattern_type == "user_id": 49 template_rule["pattern"] = user.to_string() 50 elif pattern_type == "user_localpart": 51 template_rule["pattern"] = user.localpart 52 53 template_rule["enabled"] = enabled 54 55 if "conditions" not in template_rule: 56 # Not all formatted rules have explicit conditions, e.g. "room" 57 # rules omit them as they can be derived from the kind and rule ID. 58 # 59 # If the formatted rule has no conditions then we can skip the 60 # formatting of conditions. 61 continue 62 63 # Remove internal stuff. 64 template_rule["conditions"] = copy.deepcopy(template_rule["conditions"]) 65 for c in template_rule["conditions"]: 66 c.pop("_cache_key", None) 67 68 pattern_type = c.pop("pattern_type", None) 69 if pattern_type == "user_id": 70 c["pattern"] = user.to_string() 71 elif pattern_type == "user_localpart": 72 c["pattern"] = user.localpart 73 74 sender_type = c.pop("sender_type", None) 75 if sender_type == "user_id": 76 c["sender"] = user.to_string() 77 78 return rules 79 80 81 def _add_empty_priority_class_arrays(d: Dict[str, list]) -> Dict[str, list]: 82 for pc in PRIORITY_CLASS_MAP.keys(): 83 d[pc] = [] 84 return d 85 86 87 def _rule_to_template(rule: PushRule) -> Optional[Dict[str, Any]]: 88 templaterule: Dict[str, Any] 89 90 unscoped_rule_id = _rule_id_from_namespaced(rule.rule_id) 91 92 template_name = _priority_class_to_template_name(rule.priority_class) 93 if template_name in ["override", "underride"]: 94 templaterule = {"conditions": rule.conditions, "actions": rule.actions} 95 elif template_name in ["sender", "room"]: 96 templaterule = {"actions": rule.actions} 97 unscoped_rule_id = rule.conditions[0]["pattern"] 98 elif template_name == "content": 99 if len(rule.conditions) != 1: 100 return None 101 thecond = rule.conditions[0] 102 103 templaterule = {"actions": rule.actions} 104 if "pattern" in thecond: 105 templaterule["pattern"] = thecond["pattern"] 106 elif "pattern_type" in thecond: 107 templaterule["pattern_type"] = thecond["pattern_type"] 108 else: 109 return None 110 else: 111 # This should not be reached unless this function is not kept in sync 112 # with PRIORITY_CLASS_INVERSE_MAP. 113 raise ValueError("Unexpected template_name: %s" % (template_name,)) 114 115 templaterule["rule_id"] = unscoped_rule_id 116 templaterule["default"] = rule.default 117 return templaterule 118 119 120 def _rule_id_from_namespaced(in_rule_id: str) -> str: 121 return in_rule_id.split("/")[-1] 122 123 124 def _priority_class_to_template_name(pc: int) -> str: 125 return PRIORITY_CLASS_INVERSE_MAP[pc] 126 [end of synapse/push/clientformat.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -26,10 +26,7 @@ """Converts a list of rawrules and a enabled map into nested dictionaries to match the Matrix client-server format for push rules""" - rules: Dict[str, Dict[str, List[Dict[str, Any]]]] = { - "global": {}, - "device": {}, - } + rules: Dict[str, Dict[str, List[Dict[str, Any]]]] = {"global": {}} rules["global"] = _add_empty_priority_class_arrays(rules["global"])
{"golden_diff": "diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py\n--- a/synapse/push/clientformat.py\n+++ b/synapse/push/clientformat.py\n@@ -26,10 +26,7 @@\n \"\"\"Converts a list of rawrules and a enabled map into nested dictionaries\n to match the Matrix client-server format for push rules\"\"\"\n \n- rules: Dict[str, Dict[str, List[Dict[str, Any]]]] = {\n- \"global\": {},\n- \"device\": {},\n- }\n+ rules: Dict[str, Dict[str, List[Dict[str, Any]]]] = {\"global\": {}}\n \n rules[\"global\"] = _add_empty_priority_class_arrays(rules[\"global\"])\n", "issue": "synapse returns undocumented `device` property from `GET /_matrix/client/v3/pushrules/`\nSynapse's response to a `/pushrules` query looks something like:\r\n\r\n```json5\r\n[\r\n \"global\": {\r\n \"content\": { /*... */ },\r\n \"override\": { /*... */ },\r\n \"room\": { /*... */ },\r\n \"sender\": { /*... */ },\r\n \"underride\": { /*... */ }\r\n },\r\n \"device\": {}\r\n]\r\n```\r\n\r\nThis `device` property is not [specced](https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv3pushrules) and should be removed.\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2016 OpenMarket Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nfrom typing import Any, Dict, List, Optional\n\nfrom synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP\nfrom synapse.synapse_rust.push import FilteredPushRules, PushRule\nfrom synapse.types import UserID\n\n\ndef format_push_rules_for_user(\n user: UserID, ruleslist: FilteredPushRules\n) -> Dict[str, Dict[str, list]]:\n \"\"\"Converts a list of rawrules and a enabled map into nested dictionaries\n to match the Matrix client-server format for push rules\"\"\"\n\n rules: Dict[str, Dict[str, List[Dict[str, Any]]]] = {\n \"global\": {},\n \"device\": {},\n }\n\n rules[\"global\"] = _add_empty_priority_class_arrays(rules[\"global\"])\n\n for r, enabled in ruleslist.rules():\n template_name = _priority_class_to_template_name(r.priority_class)\n\n rulearray = rules[\"global\"][template_name]\n\n template_rule = _rule_to_template(r)\n if not template_rule:\n continue\n\n rulearray.append(template_rule)\n\n pattern_type = template_rule.pop(\"pattern_type\", None)\n if pattern_type == \"user_id\":\n template_rule[\"pattern\"] = user.to_string()\n elif pattern_type == \"user_localpart\":\n template_rule[\"pattern\"] = user.localpart\n\n template_rule[\"enabled\"] = enabled\n\n if \"conditions\" not in template_rule:\n # Not all formatted rules have explicit conditions, e.g. \"room\"\n # rules omit them as they can be derived from the kind and rule ID.\n #\n # If the formatted rule has no conditions then we can skip the\n # formatting of conditions.\n continue\n\n # Remove internal stuff.\n template_rule[\"conditions\"] = copy.deepcopy(template_rule[\"conditions\"])\n for c in template_rule[\"conditions\"]:\n c.pop(\"_cache_key\", None)\n\n pattern_type = c.pop(\"pattern_type\", None)\n if pattern_type == \"user_id\":\n c[\"pattern\"] = user.to_string()\n elif pattern_type == \"user_localpart\":\n c[\"pattern\"] = user.localpart\n\n sender_type = c.pop(\"sender_type\", None)\n if sender_type == \"user_id\":\n c[\"sender\"] = user.to_string()\n\n return rules\n\n\ndef _add_empty_priority_class_arrays(d: Dict[str, list]) -> Dict[str, list]:\n for pc in PRIORITY_CLASS_MAP.keys():\n d[pc] = []\n return d\n\n\ndef _rule_to_template(rule: PushRule) -> Optional[Dict[str, Any]]:\n templaterule: Dict[str, Any]\n\n unscoped_rule_id = _rule_id_from_namespaced(rule.rule_id)\n\n template_name = _priority_class_to_template_name(rule.priority_class)\n if template_name in [\"override\", \"underride\"]:\n templaterule = {\"conditions\": rule.conditions, \"actions\": rule.actions}\n elif template_name in [\"sender\", \"room\"]:\n templaterule = {\"actions\": rule.actions}\n unscoped_rule_id = rule.conditions[0][\"pattern\"]\n elif template_name == \"content\":\n if len(rule.conditions) != 1:\n return None\n thecond = rule.conditions[0]\n\n templaterule = {\"actions\": rule.actions}\n if \"pattern\" in thecond:\n templaterule[\"pattern\"] = thecond[\"pattern\"]\n elif \"pattern_type\" in thecond:\n templaterule[\"pattern_type\"] = thecond[\"pattern_type\"]\n else:\n return None\n else:\n # This should not be reached unless this function is not kept in sync\n # with PRIORITY_CLASS_INVERSE_MAP.\n raise ValueError(\"Unexpected template_name: %s\" % (template_name,))\n\n templaterule[\"rule_id\"] = unscoped_rule_id\n templaterule[\"default\"] = rule.default\n return templaterule\n\n\ndef _rule_id_from_namespaced(in_rule_id: str) -> str:\n return in_rule_id.split(\"/\")[-1]\n\n\ndef _priority_class_to_template_name(pc: int) -> str:\n return PRIORITY_CLASS_INVERSE_MAP[pc]\n", "path": "synapse/push/clientformat.py"}]}
1,983
164
gh_patches_debug_20549
rasdani/github-patches
git_diff
liberapay__liberapay.com-1378
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cron fails to run The weekly cron for `create_payday_issue` is no longer running successfully at the scheduled time. </issue> <code> [start of liberapay/cron.py] 1 from collections import namedtuple 2 from datetime import datetime, timedelta 3 import logging 4 import threading 5 from time import sleep 6 7 8 logger = logging.getLogger('liberapay.cron') 9 10 11 Daily = namedtuple('Daily', 'hour') 12 Weekly = namedtuple('Weekly', 'weekday hour') 13 14 15 class Cron(object): 16 17 def __init__(self, website): 18 self.website = website 19 self.conn = None 20 self.has_lock = False 21 self.exclusive_jobs = [] 22 23 def __call__(self, period, func, exclusive=False): 24 if not self.website.env.run_cron_jobs: 25 return 26 if exclusive and not self.has_lock: 27 self.exclusive_jobs.append((period, func)) 28 self._wait_for_lock() 29 return 30 def f(): 31 if isinstance(period, Weekly): 32 while True: 33 now = datetime.utcnow() 34 then = now.replace(hour=period.hour, minute=10, second=0) 35 days = (now.isoweekday() - period.weekday) % 7 36 if days: 37 then += timedelta(days=days) 38 seconds = (then - now).total_seconds() 39 if seconds > 0: 40 sleep(seconds) 41 elif seconds < -60: 42 sleep(86400 * 6) 43 continue 44 try: 45 func() 46 except Exception as e: 47 self.website.tell_sentry(e, {}) 48 sleep(86400 * 6) 49 elif isinstance(period, Daily): 50 while True: 51 now = datetime.utcnow() 52 then = now.replace(hour=period.hour, minute=5, second=0) 53 seconds = (then - now).total_seconds() 54 if seconds > 0: 55 # later today 56 sleep(seconds) 57 elif seconds < -60: 58 # tomorrow 59 sleep(3600 * 24 + seconds) 60 try: 61 func() 62 except Exception as e: 63 self.website.tell_sentry(e, {}) 64 # retry in 5 minutes 65 sleep(300) 66 else: 67 # success, sleep until tomorrow 68 sleep(3600 * 23) 69 else: 70 assert period >= 1 71 while True: 72 try: 73 func() 74 except Exception as e: 75 self.website.tell_sentry(e, {}) 76 sleep(period) 77 t = threading.Thread(target=f) 78 t.daemon = True 79 t.start() 80 81 def _wait_for_lock(self): 82 if self.conn: 83 return # Already waiting 84 self.conn = self.website.db.get_connection().__enter__() 85 def f(): 86 cursor = self.conn.cursor() 87 while True: 88 if cursor.one("SELECT pg_try_advisory_lock(0)"): 89 self.has_lock = True 90 break 91 sleep(300) 92 for job in self.exclusive_jobs: 93 self(*job, exclusive=True) 94 t = threading.Thread(target=f) 95 t.daemon = True 96 t.start() 97 [end of liberapay/cron.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/liberapay/cron.py b/liberapay/cron.py --- a/liberapay/cron.py +++ b/liberapay/cron.py @@ -21,7 +21,7 @@ self.exclusive_jobs = [] def __call__(self, period, func, exclusive=False): - if not self.website.env.run_cron_jobs: + if not self.website.env.run_cron_jobs or not period: return if exclusive and not self.has_lock: self.exclusive_jobs.append((period, func)) @@ -32,7 +32,7 @@ while True: now = datetime.utcnow() then = now.replace(hour=period.hour, minute=10, second=0) - days = (now.isoweekday() - period.weekday) % 7 + days = (period.weekday - now.isoweekday()) % 7 if days: then += timedelta(days=days) seconds = (then - now).total_seconds()
{"golden_diff": "diff --git a/liberapay/cron.py b/liberapay/cron.py\n--- a/liberapay/cron.py\n+++ b/liberapay/cron.py\n@@ -21,7 +21,7 @@\n self.exclusive_jobs = []\n \n def __call__(self, period, func, exclusive=False):\n- if not self.website.env.run_cron_jobs:\n+ if not self.website.env.run_cron_jobs or not period:\n return\n if exclusive and not self.has_lock:\n self.exclusive_jobs.append((period, func))\n@@ -32,7 +32,7 @@\n while True:\n now = datetime.utcnow()\n then = now.replace(hour=period.hour, minute=10, second=0)\n- days = (now.isoweekday() - period.weekday) % 7\n+ days = (period.weekday - now.isoweekday()) % 7\n if days:\n then += timedelta(days=days)\n seconds = (then - now).total_seconds()\n", "issue": "Cron fails to run\nThe weekly cron for `create_payday_issue` is no longer running successfully at the scheduled time.\n", "before_files": [{"content": "from collections import namedtuple\nfrom datetime import datetime, timedelta\nimport logging\nimport threading\nfrom time import sleep\n\n\nlogger = logging.getLogger('liberapay.cron')\n\n\nDaily = namedtuple('Daily', 'hour')\nWeekly = namedtuple('Weekly', 'weekday hour')\n\n\nclass Cron(object):\n\n def __init__(self, website):\n self.website = website\n self.conn = None\n self.has_lock = False\n self.exclusive_jobs = []\n\n def __call__(self, period, func, exclusive=False):\n if not self.website.env.run_cron_jobs:\n return\n if exclusive and not self.has_lock:\n self.exclusive_jobs.append((period, func))\n self._wait_for_lock()\n return\n def f():\n if isinstance(period, Weekly):\n while True:\n now = datetime.utcnow()\n then = now.replace(hour=period.hour, minute=10, second=0)\n days = (now.isoweekday() - period.weekday) % 7\n if days:\n then += timedelta(days=days)\n seconds = (then - now).total_seconds()\n if seconds > 0:\n sleep(seconds)\n elif seconds < -60:\n sleep(86400 * 6)\n continue\n try:\n func()\n except Exception as e:\n self.website.tell_sentry(e, {})\n sleep(86400 * 6)\n elif isinstance(period, Daily):\n while True:\n now = datetime.utcnow()\n then = now.replace(hour=period.hour, minute=5, second=0)\n seconds = (then - now).total_seconds()\n if seconds > 0:\n # later today\n sleep(seconds)\n elif seconds < -60:\n # tomorrow\n sleep(3600 * 24 + seconds)\n try:\n func()\n except Exception as e:\n self.website.tell_sentry(e, {})\n # retry in 5 minutes\n sleep(300)\n else:\n # success, sleep until tomorrow\n sleep(3600 * 23)\n else:\n assert period >= 1\n while True:\n try:\n func()\n except Exception as e:\n self.website.tell_sentry(e, {})\n sleep(period)\n t = threading.Thread(target=f)\n t.daemon = True\n t.start()\n\n def _wait_for_lock(self):\n if self.conn:\n return # Already waiting\n self.conn = self.website.db.get_connection().__enter__()\n def f():\n cursor = self.conn.cursor()\n while True:\n if cursor.one(\"SELECT pg_try_advisory_lock(0)\"):\n self.has_lock = True\n break\n sleep(300)\n for job in self.exclusive_jobs:\n self(*job, exclusive=True)\n t = threading.Thread(target=f)\n t.daemon = True\n t.start()\n", "path": "liberapay/cron.py"}]}
1,379
228
gh_patches_debug_17568
rasdani/github-patches
git_diff
pyqtgraph__pyqtgraph-473
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> QLayoutWidget Add Label Error: No method AddItem The code in QLayoutWidget looks something like: ``` def addLabel(self, text=' ', row=None, col=None, rowspan=1, colspan=1, **kargs): """ Create a QLabel with *text* and place it in the next available cell (or in the cell specified) All extra keyword arguments are passed to QLabel(). Returns the created widget. """ text = QtGui.QLabel(text, **kargs) self.addItem(text, row, col, rowspan, colspan) return text ``` I think it should have `self.addWidget` instead of `self.addItem` as addItem method is missing. Error: ``` AttributeError: 'LayoutWidget' object has no attribute 'addItem' ``` </issue> <code> [start of pyqtgraph/widgets/LayoutWidget.py] 1 from ..Qt import QtGui, QtCore 2 3 __all__ = ['LayoutWidget'] 4 class LayoutWidget(QtGui.QWidget): 5 """ 6 Convenience class used for laying out QWidgets in a grid. 7 (It's just a little less effort to use than QGridLayout) 8 """ 9 10 def __init__(self, parent=None): 11 QtGui.QWidget.__init__(self, parent) 12 self.layout = QtGui.QGridLayout() 13 self.setLayout(self.layout) 14 self.items = {} 15 self.rows = {} 16 self.currentRow = 0 17 self.currentCol = 0 18 19 def nextRow(self): 20 """Advance to next row for automatic widget placement""" 21 self.currentRow += 1 22 self.currentCol = 0 23 24 def nextColumn(self, colspan=1): 25 """Advance to next column, while returning the current column number 26 (generally only for internal use--called by addWidget)""" 27 self.currentCol += colspan 28 return self.currentCol-colspan 29 30 def nextCol(self, *args, **kargs): 31 """Alias of nextColumn""" 32 return self.nextColumn(*args, **kargs) 33 34 35 def addLabel(self, text=' ', row=None, col=None, rowspan=1, colspan=1, **kargs): 36 """ 37 Create a QLabel with *text* and place it in the next available cell (or in the cell specified) 38 All extra keyword arguments are passed to QLabel(). 39 Returns the created widget. 40 """ 41 text = QtGui.QLabel(text, **kargs) 42 self.addItem(text, row, col, rowspan, colspan) 43 return text 44 45 def addLayout(self, row=None, col=None, rowspan=1, colspan=1, **kargs): 46 """ 47 Create an empty LayoutWidget and place it in the next available cell (or in the cell specified) 48 All extra keyword arguments are passed to :func:`LayoutWidget.__init__ <pyqtgraph.LayoutWidget.__init__>` 49 Returns the created widget. 50 """ 51 layout = LayoutWidget(**kargs) 52 self.addItem(layout, row, col, rowspan, colspan) 53 return layout 54 55 def addWidget(self, item, row=None, col=None, rowspan=1, colspan=1): 56 """ 57 Add a widget to the layout and place it in the next available cell (or in the cell specified). 58 """ 59 if row == 'next': 60 self.nextRow() 61 row = self.currentRow 62 elif row is None: 63 row = self.currentRow 64 65 66 if col is None: 67 col = self.nextCol(colspan) 68 69 if row not in self.rows: 70 self.rows[row] = {} 71 self.rows[row][col] = item 72 self.items[item] = (row, col) 73 74 self.layout.addWidget(item, row, col, rowspan, colspan) 75 76 def getWidget(self, row, col): 77 """Return the widget in (*row*, *col*)""" 78 return self.row[row][col] 79 80 #def itemIndex(self, item): 81 #for i in range(self.layout.count()): 82 #if self.layout.itemAt(i).graphicsItem() is item: 83 #return i 84 #raise Exception("Could not determine index of item " + str(item)) 85 86 #def removeItem(self, item): 87 #"""Remove *item* from the layout.""" 88 #ind = self.itemIndex(item) 89 #self.layout.removeAt(ind) 90 #self.scene().removeItem(item) 91 #r,c = self.items[item] 92 #del self.items[item] 93 #del self.rows[r][c] 94 #self.update() 95 96 #def clear(self): 97 #items = [] 98 #for i in list(self.items.keys()): 99 #self.removeItem(i) 100 101 102 [end of pyqtgraph/widgets/LayoutWidget.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyqtgraph/widgets/LayoutWidget.py b/pyqtgraph/widgets/LayoutWidget.py --- a/pyqtgraph/widgets/LayoutWidget.py +++ b/pyqtgraph/widgets/LayoutWidget.py @@ -39,7 +39,7 @@ Returns the created widget. """ text = QtGui.QLabel(text, **kargs) - self.addItem(text, row, col, rowspan, colspan) + self.addWidget(text, row, col, rowspan, colspan) return text def addLayout(self, row=None, col=None, rowspan=1, colspan=1, **kargs): @@ -49,7 +49,7 @@ Returns the created widget. """ layout = LayoutWidget(**kargs) - self.addItem(layout, row, col, rowspan, colspan) + self.addWidget(layout, row, col, rowspan, colspan) return layout def addWidget(self, item, row=None, col=None, rowspan=1, colspan=1):
{"golden_diff": "diff --git a/pyqtgraph/widgets/LayoutWidget.py b/pyqtgraph/widgets/LayoutWidget.py\n--- a/pyqtgraph/widgets/LayoutWidget.py\n+++ b/pyqtgraph/widgets/LayoutWidget.py\n@@ -39,7 +39,7 @@\n Returns the created widget.\n \"\"\"\n text = QtGui.QLabel(text, **kargs)\n- self.addItem(text, row, col, rowspan, colspan)\n+ self.addWidget(text, row, col, rowspan, colspan)\n return text\n \n def addLayout(self, row=None, col=None, rowspan=1, colspan=1, **kargs):\n@@ -49,7 +49,7 @@\n Returns the created widget.\n \"\"\"\n layout = LayoutWidget(**kargs)\n- self.addItem(layout, row, col, rowspan, colspan)\n+ self.addWidget(layout, row, col, rowspan, colspan)\n return layout\n \n def addWidget(self, item, row=None, col=None, rowspan=1, colspan=1):\n", "issue": "QLayoutWidget Add Label Error: No method AddItem\nThe code in QLayoutWidget looks something like:\n\n```\n def addLabel(self, text=' ', row=None, col=None, rowspan=1, colspan=1, **kargs):\n \"\"\"\n Create a QLabel with *text* and place it in the next available cell (or in the cell specified)\n All extra keyword arguments are passed to QLabel().\n Returns the created widget.\n \"\"\"\n text = QtGui.QLabel(text, **kargs)\n self.addItem(text, row, col, rowspan, colspan)\n return text\n```\n\nI think it should have `self.addWidget` instead of `self.addItem` as addItem method is missing.\n\nError:\n\n```\nAttributeError: 'LayoutWidget' object has no attribute 'addItem'\n```\n\n", "before_files": [{"content": "from ..Qt import QtGui, QtCore\n\n__all__ = ['LayoutWidget']\nclass LayoutWidget(QtGui.QWidget):\n \"\"\"\n Convenience class used for laying out QWidgets in a grid.\n (It's just a little less effort to use than QGridLayout)\n \"\"\"\n\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.layout = QtGui.QGridLayout()\n self.setLayout(self.layout)\n self.items = {}\n self.rows = {}\n self.currentRow = 0\n self.currentCol = 0\n \n def nextRow(self):\n \"\"\"Advance to next row for automatic widget placement\"\"\"\n self.currentRow += 1\n self.currentCol = 0\n \n def nextColumn(self, colspan=1):\n \"\"\"Advance to next column, while returning the current column number \n (generally only for internal use--called by addWidget)\"\"\"\n self.currentCol += colspan\n return self.currentCol-colspan\n \n def nextCol(self, *args, **kargs):\n \"\"\"Alias of nextColumn\"\"\"\n return self.nextColumn(*args, **kargs)\n \n \n def addLabel(self, text=' ', row=None, col=None, rowspan=1, colspan=1, **kargs):\n \"\"\"\n Create a QLabel with *text* and place it in the next available cell (or in the cell specified)\n All extra keyword arguments are passed to QLabel().\n Returns the created widget.\n \"\"\"\n text = QtGui.QLabel(text, **kargs)\n self.addItem(text, row, col, rowspan, colspan)\n return text\n \n def addLayout(self, row=None, col=None, rowspan=1, colspan=1, **kargs):\n \"\"\"\n Create an empty LayoutWidget and place it in the next available cell (or in the cell specified)\n All extra keyword arguments are passed to :func:`LayoutWidget.__init__ <pyqtgraph.LayoutWidget.__init__>`\n Returns the created widget.\n \"\"\"\n layout = LayoutWidget(**kargs)\n self.addItem(layout, row, col, rowspan, colspan)\n return layout\n \n def addWidget(self, item, row=None, col=None, rowspan=1, colspan=1):\n \"\"\"\n Add a widget to the layout and place it in the next available cell (or in the cell specified).\n \"\"\"\n if row == 'next':\n self.nextRow()\n row = self.currentRow\n elif row is None:\n row = self.currentRow\n \n \n if col is None:\n col = self.nextCol(colspan)\n \n if row not in self.rows:\n self.rows[row] = {}\n self.rows[row][col] = item\n self.items[item] = (row, col)\n \n self.layout.addWidget(item, row, col, rowspan, colspan)\n\n def getWidget(self, row, col):\n \"\"\"Return the widget in (*row*, *col*)\"\"\"\n return self.row[row][col]\n\n #def itemIndex(self, item):\n #for i in range(self.layout.count()):\n #if self.layout.itemAt(i).graphicsItem() is item:\n #return i\n #raise Exception(\"Could not determine index of item \" + str(item))\n \n #def removeItem(self, item):\n #\"\"\"Remove *item* from the layout.\"\"\"\n #ind = self.itemIndex(item)\n #self.layout.removeAt(ind)\n #self.scene().removeItem(item)\n #r,c = self.items[item]\n #del self.items[item]\n #del self.rows[r][c]\n #self.update()\n \n #def clear(self):\n #items = []\n #for i in list(self.items.keys()):\n #self.removeItem(i)\n\n\n", "path": "pyqtgraph/widgets/LayoutWidget.py"}]}
1,696
213
gh_patches_debug_13509
rasdani/github-patches
git_diff
UTNkar__moore-512
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix navigation bar on laptop screens On utn.se there are many items in the menu. On laptop screens (1366x768 pixels) the navigation bar splits into two rows which makes it unusable. To solve this, make the navigation bar scrollable if it's width gets to long. That way it will still be usable. This is done by setting `scroll: auto` on the container. However, the navigation bar is built with the materialize library which adds a lot of css to the navigation bar. To get around this, we have to use `<div>` instead of `<nav>` and change the name of the class for the navigation bar </issue> <code> [start of src/branding/templatetags/branding_tags.py] 1 from django import template 2 3 from branding.models import Logo, SocialMediaSettings, FooterSettings 4 from wagtail.core.models import Site 5 6 register = template.Library() 7 8 9 @register.inclusion_tag('branding/tags/footer.html', takes_context=True) 10 def custom_footer(context): 11 request = context['request'] 12 site = Site.find_for_request(request) 13 return { 14 'settings': FooterSettings.for_site(site) 15 } 16 17 18 @register.inclusion_tag('branding/tags/structure_header.html', 19 takes_context=True) 20 def structure_header(context, logo_color=''): 21 request = context['request'] 22 site = Site.find_for_request(request) 23 logos = Logo.objects.exclude(belongs_to=site).all() 24 committees = logos.filter(category='committee') 25 sections = logos.filter(category='section') 26 return { 27 'committees': committees, 28 'sections': sections, 29 'color': logo_color, 30 } 31 32 33 @register.inclusion_tag('branding/tags/social_media.html', takes_context=True) 34 def social_media(context, dark=False): 35 request = context['request'] 36 site = Site.find_for_request(request) 37 return { 38 'settings': SocialMediaSettings.for_site(site), 39 'dark': dark, 40 } 41 [end of src/branding/templatetags/branding_tags.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/branding/templatetags/branding_tags.py b/src/branding/templatetags/branding_tags.py --- a/src/branding/templatetags/branding_tags.py +++ b/src/branding/templatetags/branding_tags.py @@ -23,10 +23,19 @@ logos = Logo.objects.exclude(belongs_to=site).all() committees = logos.filter(category='committee') sections = logos.filter(category='section') + + committees_left = committees[:len(committees) // 2] + committees_right = committees[len(committees) // 2:] + + sections_left = sections[:len(sections) // 2] + sections_right = sections[len(sections) // 2:] + return { - 'committees': committees, - 'sections': sections, 'color': logo_color, + 'committees_left': committees_left, + 'committees_right': committees_right, + 'sections_left': sections_left, + 'sections_right': sections_right, }
{"golden_diff": "diff --git a/src/branding/templatetags/branding_tags.py b/src/branding/templatetags/branding_tags.py\n--- a/src/branding/templatetags/branding_tags.py\n+++ b/src/branding/templatetags/branding_tags.py\n@@ -23,10 +23,19 @@\n logos = Logo.objects.exclude(belongs_to=site).all()\n committees = logos.filter(category='committee')\n sections = logos.filter(category='section')\n+\n+ committees_left = committees[:len(committees) // 2]\n+ committees_right = committees[len(committees) // 2:]\n+\n+ sections_left = sections[:len(sections) // 2]\n+ sections_right = sections[len(sections) // 2:]\n+\n return {\n- 'committees': committees,\n- 'sections': sections,\n 'color': logo_color,\n+ 'committees_left': committees_left,\n+ 'committees_right': committees_right,\n+ 'sections_left': sections_left,\n+ 'sections_right': sections_right,\n }\n", "issue": "Fix navigation bar on laptop screens\nOn utn.se there are many items in the menu. On laptop screens (1366x768 pixels) the navigation bar splits into two rows which makes it unusable. To solve this, make the navigation bar scrollable if it's width gets to long. That way it will still be usable. This is done by setting `scroll: auto` on the container.\r\n\r\nHowever, the navigation bar is built with the materialize library which adds a lot of css to the navigation bar. To get around this, we have to use `<div>` instead of `<nav>` and change the name of the class for the navigation bar\n", "before_files": [{"content": "from django import template\n\nfrom branding.models import Logo, SocialMediaSettings, FooterSettings\nfrom wagtail.core.models import Site\n\nregister = template.Library()\n\n\[email protected]_tag('branding/tags/footer.html', takes_context=True)\ndef custom_footer(context):\n request = context['request']\n site = Site.find_for_request(request)\n return {\n 'settings': FooterSettings.for_site(site)\n }\n\n\[email protected]_tag('branding/tags/structure_header.html',\n takes_context=True)\ndef structure_header(context, logo_color=''):\n request = context['request']\n site = Site.find_for_request(request)\n logos = Logo.objects.exclude(belongs_to=site).all()\n committees = logos.filter(category='committee')\n sections = logos.filter(category='section')\n return {\n 'committees': committees,\n 'sections': sections,\n 'color': logo_color,\n }\n\n\[email protected]_tag('branding/tags/social_media.html', takes_context=True)\ndef social_media(context, dark=False):\n request = context['request']\n site = Site.find_for_request(request)\n return {\n 'settings': SocialMediaSettings.for_site(site),\n 'dark': dark,\n }\n", "path": "src/branding/templatetags/branding_tags.py"}]}
1,020
242
gh_patches_debug_8790
rasdani/github-patches
git_diff
docker__docker-py-178
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Installed through dependencies "websocket" library doesn't work with Python 3 During `pip install docker-py` with Python 3 I'm getting following error: ``` Running setup.py install for websocket-client changing mode of build/scripts-3.3/wsdump.py from 644 to 755 File "/home/bob/env/lib/python3.3/site-packages/websocket.py", line 769 except Exception, e: ^ SyntaxError: invalid syntax ``` Looks like this error is ignored and actually docker finishes it's installation and works afterwards, but if docker-py uses websocket library it's usage will fail. According to websocket docs they have separate branch for Python 3: https://github.com/liris/websocket-client/tree/py3 </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import os 3 from setuptools import setup 4 5 ROOT_DIR = os.path.dirname(__file__) 6 SOURCE_DIR = os.path.join(ROOT_DIR) 7 8 test_requirements = [] 9 with open('./requirements.txt') as requirements_txt: 10 requirements = [line for line in requirements_txt] 11 12 setup( 13 name="docker-py", 14 version='0.3.0', 15 description="Python client for Docker.", 16 packages=['docker', 'docker.auth', 'docker.unixconn', 'docker.utils'], 17 install_requires=requirements + test_requirements, 18 zip_safe=False, 19 test_suite='tests', 20 classifiers=[ 21 'Development Status :: 4 - Beta', 22 'Environment :: Other Environment', 23 'Intended Audience :: Developers', 24 'Operating System :: OS Independent', 25 'Programming Language :: Python', 26 'Programming Language :: Python :: 2.6', 27 'Programming Language :: Python :: 2.7', 28 'Programming Language :: Python :: 3.2', 29 'Programming Language :: Python :: 3.3', 30 'Topic :: Utilities', 31 'License :: OSI Approved :: Apache Software License', 32 ], 33 ) 34 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,12 +1,18 @@ #!/usr/bin/env python import os +import sys from setuptools import setup ROOT_DIR = os.path.dirname(__file__) SOURCE_DIR = os.path.join(ROOT_DIR) +if sys.version_info[0] == 3: + requirements_file = './requirements3.txt' +else: + requirements_file = './requirements.txt' + test_requirements = [] -with open('./requirements.txt') as requirements_txt: +with open(requirements_file) as requirements_txt: requirements = [line for line in requirements_txt] setup(
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,12 +1,18 @@\n #!/usr/bin/env python\n import os\n+import sys\n from setuptools import setup\n \n ROOT_DIR = os.path.dirname(__file__)\n SOURCE_DIR = os.path.join(ROOT_DIR)\n \n+if sys.version_info[0] == 3:\n+ requirements_file = './requirements3.txt'\n+else:\n+ requirements_file = './requirements.txt'\n+\n test_requirements = []\n-with open('./requirements.txt') as requirements_txt:\n+with open(requirements_file) as requirements_txt:\n requirements = [line for line in requirements_txt]\n \n setup(\n", "issue": "Installed through dependencies \"websocket\" library doesn't work with Python 3\nDuring `pip install docker-py` with Python 3 I'm getting following error:\n\n```\n Running setup.py install for websocket-client\n changing mode of build/scripts-3.3/wsdump.py from 644 to 755\n File \"/home/bob/env/lib/python3.3/site-packages/websocket.py\", line 769\n except Exception, e:\n ^\n SyntaxError: invalid syntax\n```\n\nLooks like this error is ignored and actually docker finishes it's installation and works afterwards, but if docker-py uses websocket library it's usage will fail.\n\nAccording to websocket docs they have separate branch for Python 3: https://github.com/liris/websocket-client/tree/py3\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nfrom setuptools import setup\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\ntest_requirements = []\nwith open('./requirements.txt') as requirements_txt:\n requirements = [line for line in requirements_txt]\n\nsetup(\n name=\"docker-py\",\n version='0.3.0',\n description=\"Python client for Docker.\",\n packages=['docker', 'docker.auth', 'docker.unixconn', 'docker.utils'],\n install_requires=requirements + test_requirements,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n)\n", "path": "setup.py"}]}
993
145
gh_patches_debug_38573
rasdani/github-patches
git_diff
facebookresearch__fairseq-5149
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> MMS Multiple voice inputs problem When I use the following command: python examples/mms/asr/infer/mms_infer.py --model "/path/to/asr/model" --lang lang_code --audio "/path/to/audio_1.wav" "/path/to/audio_1.wav" The result is disorder. How to correspond one by one. </issue> <code> [start of examples/mms/asr/infer/mms_infer.py] 1 #!/usr/bin/env python -u 2 # Copyright (c) Facebook, Inc. and its affiliates. 3 # 4 # This source code is licensed under the MIT license found in the 5 # LICENSE file in the root directory of this source tree. 6 7 import argparse 8 import soundfile as sf 9 import tempfile 10 from pathlib import Path 11 import os 12 import subprocess 13 import sys 14 import re 15 16 def parser(): 17 parser = argparse.ArgumentParser(description="ASR inference script for MMS model") 18 parser.add_argument("--model", type=str, help="path to ASR model", required=True) 19 parser.add_argument("--audio", type=str, help="path to audio file", required=True, nargs='+') 20 parser.add_argument("--lang", type=str, help="audio language", required=True) 21 parser.add_argument("--format", type=str, choices=["none", "letter"], default="letter") 22 return parser.parse_args() 23 24 def process(args): 25 with tempfile.TemporaryDirectory() as tmpdir: 26 print(">>> preparing tmp manifest dir ...", file=sys.stderr) 27 tmpdir = Path(tmpdir) 28 with open(tmpdir / "dev.tsv", "w") as fw: 29 fw.write("/\n") 30 for audio in args.audio: 31 nsample = sf.SoundFile(audio).frames 32 fw.write(f"{audio}\t{nsample}\n") 33 with open(tmpdir / "dev.uid", "w") as fw: 34 fw.write(f"{audio}\n"*len(args.audio)) 35 with open(tmpdir / "dev.ltr", "w") as fw: 36 fw.write("d u m m y | d u m m y\n"*len(args.audio)) 37 with open(tmpdir / "dev.wrd", "w") as fw: 38 fw.write("dummy dummy\n"*len(args.audio)) 39 cmd = f""" 40 PYTHONPATH=. PREFIX=INFER HYDRA_FULL_ERROR=1 python examples/speech_recognition/new/infer.py -m --config-dir examples/mms/asr/config/ --config-name infer_common decoding.type=viterbi dataset.max_tokens=4000000 distributed_training.distributed_world_size=1 "common_eval.path='{args.model}'" task.data={tmpdir} dataset.gen_subset="{args.lang}:dev" common_eval.post_process={args.format} decoding.results_path={tmpdir} 41 """ 42 print(">>> loading model & running inference ...", file=sys.stderr) 43 subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL,) 44 with open(tmpdir/"hypo.word") as fr: 45 for ii, hypo in enumerate(fr): 46 hypo = re.sub("\(\S+\)$", "", hypo).strip() 47 print(f'===============\nInput: {args.audio[ii]}\nOutput: {hypo}') 48 49 50 if __name__ == "__main__": 51 args = parser() 52 process(args) 53 [end of examples/mms/asr/infer/mms_infer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/mms/asr/infer/mms_infer.py b/examples/mms/asr/infer/mms_infer.py --- a/examples/mms/asr/infer/mms_infer.py +++ b/examples/mms/asr/infer/mms_infer.py @@ -21,28 +21,38 @@ parser.add_argument("--format", type=str, choices=["none", "letter"], default="letter") return parser.parse_args() +def reorder_decode(hypos): + outputs = [] + for hypo in hypos: + idx = int(re.findall("\(None-(\d+)\)$", hypo)[0]) + hypo = re.sub("\(\S+\)$", "", hypo).strip() + outputs.append((idx, hypo)) + outputs = sorted(outputs) + return outputs + def process(args): with tempfile.TemporaryDirectory() as tmpdir: print(">>> preparing tmp manifest dir ...", file=sys.stderr) tmpdir = Path(tmpdir) - with open(tmpdir / "dev.tsv", "w") as fw: + with open(tmpdir / "dev.tsv", "w") as fw, open(tmpdir / "dev.uid", "w") as fu: fw.write("/\n") for audio in args.audio: nsample = sf.SoundFile(audio).frames fw.write(f"{audio}\t{nsample}\n") - with open(tmpdir / "dev.uid", "w") as fw: - fw.write(f"{audio}\n"*len(args.audio)) + fu.write(f"{audio}\n") with open(tmpdir / "dev.ltr", "w") as fw: - fw.write("d u m m y | d u m m y\n"*len(args.audio)) + fw.write("d u m m y | d u m m y |\n"*len(args.audio)) with open(tmpdir / "dev.wrd", "w") as fw: fw.write("dummy dummy\n"*len(args.audio)) cmd = f""" - PYTHONPATH=. PREFIX=INFER HYDRA_FULL_ERROR=1 python examples/speech_recognition/new/infer.py -m --config-dir examples/mms/asr/config/ --config-name infer_common decoding.type=viterbi dataset.max_tokens=4000000 distributed_training.distributed_world_size=1 "common_eval.path='{args.model}'" task.data={tmpdir} dataset.gen_subset="{args.lang}:dev" common_eval.post_process={args.format} decoding.results_path={tmpdir} + PYTHONPATH=. PREFIX=INFER HYDRA_FULL_ERROR=1 python examples/speech_recognition/new/infer.py -m --config-dir examples/mms/asr/config/ --config-name infer_common decoding.type=viterbi dataset.max_tokens=1440000 distributed_training.distributed_world_size=1 "common_eval.path='{args.model}'" task.data={tmpdir} dataset.gen_subset="{args.lang}:dev" common_eval.post_process={args.format} decoding.results_path={tmpdir} """ print(">>> loading model & running inference ...", file=sys.stderr) subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL,) with open(tmpdir/"hypo.word") as fr: - for ii, hypo in enumerate(fr): + hypos = fr.readlines() + outputs = reorder_decode(hypos) + for ii, hypo in outputs: hypo = re.sub("\(\S+\)$", "", hypo).strip() print(f'===============\nInput: {args.audio[ii]}\nOutput: {hypo}')
{"golden_diff": "diff --git a/examples/mms/asr/infer/mms_infer.py b/examples/mms/asr/infer/mms_infer.py\n--- a/examples/mms/asr/infer/mms_infer.py\n+++ b/examples/mms/asr/infer/mms_infer.py\n@@ -21,28 +21,38 @@\n parser.add_argument(\"--format\", type=str, choices=[\"none\", \"letter\"], default=\"letter\")\n return parser.parse_args()\n \n+def reorder_decode(hypos):\n+ outputs = []\n+ for hypo in hypos:\n+ idx = int(re.findall(\"\\(None-(\\d+)\\)$\", hypo)[0])\n+ hypo = re.sub(\"\\(\\S+\\)$\", \"\", hypo).strip()\n+ outputs.append((idx, hypo))\n+ outputs = sorted(outputs)\n+ return outputs\n+\n def process(args): \n with tempfile.TemporaryDirectory() as tmpdir:\n print(\">>> preparing tmp manifest dir ...\", file=sys.stderr)\n tmpdir = Path(tmpdir)\n- with open(tmpdir / \"dev.tsv\", \"w\") as fw:\n+ with open(tmpdir / \"dev.tsv\", \"w\") as fw, open(tmpdir / \"dev.uid\", \"w\") as fu:\n fw.write(\"/\\n\")\n for audio in args.audio:\n nsample = sf.SoundFile(audio).frames\n fw.write(f\"{audio}\\t{nsample}\\n\")\n- with open(tmpdir / \"dev.uid\", \"w\") as fw:\n- fw.write(f\"{audio}\\n\"*len(args.audio))\n+ fu.write(f\"{audio}\\n\")\n with open(tmpdir / \"dev.ltr\", \"w\") as fw:\n- fw.write(\"d u m m y | d u m m y\\n\"*len(args.audio))\n+ fw.write(\"d u m m y | d u m m y |\\n\"*len(args.audio))\n with open(tmpdir / \"dev.wrd\", \"w\") as fw:\n fw.write(\"dummy dummy\\n\"*len(args.audio))\n cmd = f\"\"\"\n- PYTHONPATH=. PREFIX=INFER HYDRA_FULL_ERROR=1 python examples/speech_recognition/new/infer.py -m --config-dir examples/mms/asr/config/ --config-name infer_common decoding.type=viterbi dataset.max_tokens=4000000 distributed_training.distributed_world_size=1 \"common_eval.path='{args.model}'\" task.data={tmpdir} dataset.gen_subset=\"{args.lang}:dev\" common_eval.post_process={args.format} decoding.results_path={tmpdir}\n+ PYTHONPATH=. PREFIX=INFER HYDRA_FULL_ERROR=1 python examples/speech_recognition/new/infer.py -m --config-dir examples/mms/asr/config/ --config-name infer_common decoding.type=viterbi dataset.max_tokens=1440000 distributed_training.distributed_world_size=1 \"common_eval.path='{args.model}'\" task.data={tmpdir} dataset.gen_subset=\"{args.lang}:dev\" common_eval.post_process={args.format} decoding.results_path={tmpdir}\n \"\"\"\n print(\">>> loading model & running inference ...\", file=sys.stderr)\n subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL,)\n with open(tmpdir/\"hypo.word\") as fr:\n- for ii, hypo in enumerate(fr):\n+ hypos = fr.readlines()\n+ outputs = reorder_decode(hypos)\n+ for ii, hypo in outputs:\n hypo = re.sub(\"\\(\\S+\\)$\", \"\", hypo).strip()\n print(f'===============\\nInput: {args.audio[ii]}\\nOutput: {hypo}')\n", "issue": "MMS Multiple voice inputs problem\nWhen I use the following command\uff1a\r\npython examples/mms/asr/infer/mms_infer.py --model \"/path/to/asr/model\" --lang lang_code --audio \"/path/to/audio_1.wav\" \"/path/to/audio_1.wav\"\r\nThe result is disorder. How to correspond one by one.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport soundfile as sf\nimport tempfile\nfrom pathlib import Path\nimport os\nimport subprocess\nimport sys\nimport re\n\ndef parser():\n parser = argparse.ArgumentParser(description=\"ASR inference script for MMS model\")\n parser.add_argument(\"--model\", type=str, help=\"path to ASR model\", required=True)\n parser.add_argument(\"--audio\", type=str, help=\"path to audio file\", required=True, nargs='+')\n parser.add_argument(\"--lang\", type=str, help=\"audio language\", required=True)\n parser.add_argument(\"--format\", type=str, choices=[\"none\", \"letter\"], default=\"letter\")\n return parser.parse_args()\n\ndef process(args): \n with tempfile.TemporaryDirectory() as tmpdir:\n print(\">>> preparing tmp manifest dir ...\", file=sys.stderr)\n tmpdir = Path(tmpdir)\n with open(tmpdir / \"dev.tsv\", \"w\") as fw:\n fw.write(\"/\\n\")\n for audio in args.audio:\n nsample = sf.SoundFile(audio).frames\n fw.write(f\"{audio}\\t{nsample}\\n\")\n with open(tmpdir / \"dev.uid\", \"w\") as fw:\n fw.write(f\"{audio}\\n\"*len(args.audio))\n with open(tmpdir / \"dev.ltr\", \"w\") as fw:\n fw.write(\"d u m m y | d u m m y\\n\"*len(args.audio))\n with open(tmpdir / \"dev.wrd\", \"w\") as fw:\n fw.write(\"dummy dummy\\n\"*len(args.audio))\n cmd = f\"\"\"\n PYTHONPATH=. PREFIX=INFER HYDRA_FULL_ERROR=1 python examples/speech_recognition/new/infer.py -m --config-dir examples/mms/asr/config/ --config-name infer_common decoding.type=viterbi dataset.max_tokens=4000000 distributed_training.distributed_world_size=1 \"common_eval.path='{args.model}'\" task.data={tmpdir} dataset.gen_subset=\"{args.lang}:dev\" common_eval.post_process={args.format} decoding.results_path={tmpdir}\n \"\"\"\n print(\">>> loading model & running inference ...\", file=sys.stderr)\n subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL,)\n with open(tmpdir/\"hypo.word\") as fr:\n for ii, hypo in enumerate(fr):\n hypo = re.sub(\"\\(\\S+\\)$\", \"\", hypo).strip()\n print(f'===============\\nInput: {args.audio[ii]}\\nOutput: {hypo}')\n\n\nif __name__ == \"__main__\":\n args = parser()\n process(args)\n", "path": "examples/mms/asr/infer/mms_infer.py"}]}
1,318
775
gh_patches_debug_26644
rasdani/github-patches
git_diff
Kinto__kinto-293
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> cliquet.storage.exceptions.BackendError: ConnectionError: Error 8 connecting to localhos:6379 The config wizard has typos: https://github.com/Kinto/kinto/blob/master/kinto/config/__init__.py#L39 https://github.com/Kinto/kinto/blob/master/kinto/config/__init__.py#L41 https://github.com/Kinto/kinto/blob/master/kinto/config/__init__.py#L43 I am not sure how, but maybe we could have a way to test that the produced default config works </issue> <code> [start of kinto/config/__init__.py] 1 import os 2 import binascii 3 import codecs 4 from six.moves import input 5 6 HERE = os.path.abspath(os.path.dirname(__file__)) 7 8 9 def render_template(template, destination, **kwargs): 10 template = os.path.join(HERE, template) 11 12 with codecs.open(template, 'r', encoding='utf-8') as f: 13 raw_template = f.read() 14 rendered = raw_template.format(**kwargs) 15 with codecs.open(destination, 'w+', encoding='utf-8') as output: 16 output.write(rendered) 17 18 19 def init(config_file): 20 values = {} 21 values['secret'] = binascii.b2a_hex(os.urandom(32)) 22 23 backend = input("Which backend to use? " 24 "(1 - postgresql, 2 - redis, default - memory) ").strip() 25 26 if backend == '1': 27 # Postgresql configuration 28 postgresql_url = "postgres://postgres:postgres@localhost/postgres" 29 values['storage_backend'] = "cliquet.storage.postgresql" 30 values['storage_url'] = postgresql_url 31 values['cache_backend'] = "cliquet.cache.postgresql" 32 values['cache_url'] = postgresql_url 33 values['permission_backend'] = "cliquet.permission.postgresql" 34 values['permission_url'] = postgresql_url 35 36 elif backend == '2': 37 # Redis configuration 38 values['storage_backend'] = "cliquet.storage.redis" 39 values['storage_url'] = "redis://localhos:6379/1" 40 values['cache_backend'] = "cliquet.cache.redis" 41 values['cache_url'] = "redis://localhos:6379/2" 42 values['permission_backend'] = "cliquet.permission.redis" 43 values['permission_url'] = "redis://localhos:6379/3" 44 45 else: 46 # Memory configuration / default backend 47 values['storage_backend'] = "cliquet.storage.memory" 48 values['storage_url'] = "" 49 values['cache_backend'] = "cliquet.cache.memory" 50 values['cache_url'] = "" 51 values['permission_backend'] = "cliquet.permission.memory" 52 values['permission_url'] = "" 53 54 render_template("kinto.tpl", config_file, 55 secret=values['secret'], 56 storage_backend=values['storage_backend'], 57 storage_url=values['storage_url'], 58 cache_backend=values['cache_backend'], 59 cache_url=values['cache_url'], 60 permission_backend=values['permission_backend'], 61 permission_url=values['permission_url']) 62 [end of kinto/config/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py --- a/kinto/config/__init__.py +++ b/kinto/config/__init__.py @@ -36,11 +36,11 @@ elif backend == '2': # Redis configuration values['storage_backend'] = "cliquet.storage.redis" - values['storage_url'] = "redis://localhos:6379/1" + values['storage_url'] = "redis://localhost:6379/1" values['cache_backend'] = "cliquet.cache.redis" - values['cache_url'] = "redis://localhos:6379/2" + values['cache_url'] = "redis://localhost:6379/2" values['permission_backend'] = "cliquet.permission.redis" - values['permission_url'] = "redis://localhos:6379/3" + values['permission_url'] = "redis://localhost:6379/3" else: # Memory configuration / default backend @@ -51,11 +51,4 @@ values['permission_backend'] = "cliquet.permission.memory" values['permission_url'] = "" - render_template("kinto.tpl", config_file, - secret=values['secret'], - storage_backend=values['storage_backend'], - storage_url=values['storage_url'], - cache_backend=values['cache_backend'], - cache_url=values['cache_url'], - permission_backend=values['permission_backend'], - permission_url=values['permission_url']) + render_template("kinto.tpl", config_file, **values)
{"golden_diff": "diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py\n--- a/kinto/config/__init__.py\n+++ b/kinto/config/__init__.py\n@@ -36,11 +36,11 @@\n elif backend == '2':\n # Redis configuration\n values['storage_backend'] = \"cliquet.storage.redis\"\n- values['storage_url'] = \"redis://localhos:6379/1\"\n+ values['storage_url'] = \"redis://localhost:6379/1\"\n values['cache_backend'] = \"cliquet.cache.redis\"\n- values['cache_url'] = \"redis://localhos:6379/2\"\n+ values['cache_url'] = \"redis://localhost:6379/2\"\n values['permission_backend'] = \"cliquet.permission.redis\"\n- values['permission_url'] = \"redis://localhos:6379/3\"\n+ values['permission_url'] = \"redis://localhost:6379/3\"\n \n else:\n # Memory configuration / default backend\n@@ -51,11 +51,4 @@\n values['permission_backend'] = \"cliquet.permission.memory\"\n values['permission_url'] = \"\"\n \n- render_template(\"kinto.tpl\", config_file,\n- secret=values['secret'],\n- storage_backend=values['storage_backend'],\n- storage_url=values['storage_url'],\n- cache_backend=values['cache_backend'],\n- cache_url=values['cache_url'],\n- permission_backend=values['permission_backend'],\n- permission_url=values['permission_url'])\n+ render_template(\"kinto.tpl\", config_file, **values)\n", "issue": "cliquet.storage.exceptions.BackendError: ConnectionError: Error 8 connecting to localhos:6379\nThe config wizard has typos:\n\nhttps://github.com/Kinto/kinto/blob/master/kinto/config/__init__.py#L39\nhttps://github.com/Kinto/kinto/blob/master/kinto/config/__init__.py#L41\nhttps://github.com/Kinto/kinto/blob/master/kinto/config/__init__.py#L43\n\nI am not sure how, but maybe we could have a way to test that the produced default config works\n\n", "before_files": [{"content": "import os\nimport binascii\nimport codecs\nfrom six.moves import input\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef render_template(template, destination, **kwargs):\n template = os.path.join(HERE, template)\n\n with codecs.open(template, 'r', encoding='utf-8') as f:\n raw_template = f.read()\n rendered = raw_template.format(**kwargs)\n with codecs.open(destination, 'w+', encoding='utf-8') as output:\n output.write(rendered)\n\n\ndef init(config_file):\n values = {}\n values['secret'] = binascii.b2a_hex(os.urandom(32))\n\n backend = input(\"Which backend to use? \"\n \"(1 - postgresql, 2 - redis, default - memory) \").strip()\n\n if backend == '1':\n # Postgresql configuration\n postgresql_url = \"postgres://postgres:postgres@localhost/postgres\"\n values['storage_backend'] = \"cliquet.storage.postgresql\"\n values['storage_url'] = postgresql_url\n values['cache_backend'] = \"cliquet.cache.postgresql\"\n values['cache_url'] = postgresql_url\n values['permission_backend'] = \"cliquet.permission.postgresql\"\n values['permission_url'] = postgresql_url\n\n elif backend == '2':\n # Redis configuration\n values['storage_backend'] = \"cliquet.storage.redis\"\n values['storage_url'] = \"redis://localhos:6379/1\"\n values['cache_backend'] = \"cliquet.cache.redis\"\n values['cache_url'] = \"redis://localhos:6379/2\"\n values['permission_backend'] = \"cliquet.permission.redis\"\n values['permission_url'] = \"redis://localhos:6379/3\"\n\n else:\n # Memory configuration / default backend\n values['storage_backend'] = \"cliquet.storage.memory\"\n values['storage_url'] = \"\"\n values['cache_backend'] = \"cliquet.cache.memory\"\n values['cache_url'] = \"\"\n values['permission_backend'] = \"cliquet.permission.memory\"\n values['permission_url'] = \"\"\n\n render_template(\"kinto.tpl\", config_file,\n secret=values['secret'],\n storage_backend=values['storage_backend'],\n storage_url=values['storage_url'],\n cache_backend=values['cache_backend'],\n cache_url=values['cache_url'],\n permission_backend=values['permission_backend'],\n permission_url=values['permission_url'])\n", "path": "kinto/config/__init__.py"}]}
1,320
375
gh_patches_debug_518
rasdani/github-patches
git_diff
feast-dev__feast-1946
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Allow plugin repos to test against universal test suite **Is your feature request related to a problem? Please describe.** There are several plugin repos for custom connectors (Hive, Azure, Snowflake, etc.), and there is increasing interest from the community in contributing plugins. One blocker for many folks is that there is no easy way to test their custom connector against our universal test suite. Someone working on a plugin repo should be able to test their connector against the universal test suite with minimal changes in their repo. **Describe the solution you'd like** The Feast team has come up with two solutions. The first solution is a temporary fix to unblock folks who wish to start testing immediately. We recommend that you add `feast` as a [git submodule](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your plugin repo, and then install `feast` in editable mode by navigating to `feast` and running `pip install -e sdk/python/[ci]` as detailed [here](https://github.com/feast-dev/feast/blob/master/CONTRIBUTING.md). This will allow you to `import feast`, and will also allow you to run our test suite with `pytest`. For example, in `feast` you should be able to run `make test`, and all unit tests should succeed. In order to run the full suite of integration tests with your custom connector, all you need to do is modify `FULL_REPO_CONFIGS` in `sdk/python/tests/integration/feature_repos/repo_configuration.py`. Most of our integration tests rely on pytest fixtures defined in `conftest.py`, most of which are parametrized based on `FULL_REPO_CONFIGS`. The main thing you will need to do in order to overwrite `FULL_REPO_CONFIGS` is to write a `DataSourceCreator`. We consider this solution a temporary fix because it still requires that the user to modify the `feast` repo directly, even if it's in a git submodule. The second solution, which extends the first solution to be more viable in the long-term, will be to allow users to overwrite `FULL_REPO_CONFIGS` through an environment variable. This means that after adding `feast` as a git submodule, users should be able to directly run integration tests without ever needing to modify the `feast` repo. We intend to build this functionality out eventually, but are currently working on several other higher-priority features. If anyone in the community wants to take this on, that would be great! **Describe alternatives you've considered** N/A **Additional context** Add any other context or screenshots about the feature request here. </issue> <code> [start of sdk/python/feast/constants.py] 1 # 2 # Copyright 2019 The Feast Authors 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # https://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 # 16 17 # Maximum interval(secs) to wait between retries for retry function 18 MAX_WAIT_INTERVAL: str = "60" 19 20 AWS_LAMBDA_FEATURE_SERVER_IMAGE = "feastdev/feature-server:aws" 21 22 # feature_store.yaml environment variable name for remote feature server 23 FEATURE_STORE_YAML_ENV_NAME: str = "FEATURE_STORE_YAML_BASE64" 24 25 # Environment variable for toggling usage 26 FEAST_USAGE = "FEAST_USAGE" 27 [end of sdk/python/feast/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sdk/python/feast/constants.py b/sdk/python/feast/constants.py --- a/sdk/python/feast/constants.py +++ b/sdk/python/feast/constants.py @@ -24,3 +24,6 @@ # Environment variable for toggling usage FEAST_USAGE = "FEAST_USAGE" + +# Environment variable for the path for overwriting universal test configs +FULL_REPO_CONFIGS_MODULE_ENV_NAME: str = "FULL_REPO_CONFIGS_MODULE"
{"golden_diff": "diff --git a/sdk/python/feast/constants.py b/sdk/python/feast/constants.py\n--- a/sdk/python/feast/constants.py\n+++ b/sdk/python/feast/constants.py\n@@ -24,3 +24,6 @@\n \n # Environment variable for toggling usage\n FEAST_USAGE = \"FEAST_USAGE\"\n+\n+# Environment variable for the path for overwriting universal test configs\n+FULL_REPO_CONFIGS_MODULE_ENV_NAME: str = \"FULL_REPO_CONFIGS_MODULE\"\n", "issue": "Allow plugin repos to test against universal test suite\n**Is your feature request related to a problem? Please describe.**\r\nThere are several plugin repos for custom connectors (Hive, Azure, Snowflake, etc.), and there is increasing interest from the community in contributing plugins. One blocker for many folks is that there is no easy way to test their custom connector against our universal test suite. Someone working on a plugin repo should be able to test their connector against the universal test suite with minimal changes in their repo. \r\n\r\n**Describe the solution you'd like**\r\nThe Feast team has come up with two solutions. \r\n\r\nThe first solution is a temporary fix to unblock folks who wish to start testing immediately. We recommend that you add `feast` as a [git submodule](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your plugin repo, and then install `feast` in editable mode by navigating to `feast` and running `pip install -e sdk/python/[ci]` as detailed [here](https://github.com/feast-dev/feast/blob/master/CONTRIBUTING.md). This will allow you to `import feast`, and will also allow you to run our test suite with `pytest`. For example, in `feast` you should be able to run `make test`, and all unit tests should succeed. In order to run the full suite of integration tests with your custom connector, all you need to do is modify `FULL_REPO_CONFIGS` in `sdk/python/tests/integration/feature_repos/repo_configuration.py`. Most of our integration tests rely on pytest fixtures defined in `conftest.py`, most of which are parametrized based on `FULL_REPO_CONFIGS`. The main thing you will need to do in order to overwrite `FULL_REPO_CONFIGS` is to write a `DataSourceCreator`. We consider this solution a temporary fix because it still requires that the user to modify the `feast` repo directly, even if it's in a git submodule.\r\n\r\nThe second solution, which extends the first solution to be more viable in the long-term, will be to allow users to overwrite `FULL_REPO_CONFIGS` through an environment variable. This means that after adding `feast` as a git submodule, users should be able to directly run integration tests without ever needing to modify the `feast` repo. We intend to build this functionality out eventually, but are currently working on several other higher-priority features. If anyone in the community wants to take this on, that would be great!\r\n\r\n**Describe alternatives you've considered**\r\nN/A\r\n\r\n**Additional context**\r\nAdd any other context or screenshots about the feature request here.\r\n\n", "before_files": [{"content": "#\n# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# Maximum interval(secs) to wait between retries for retry function\nMAX_WAIT_INTERVAL: str = \"60\"\n\nAWS_LAMBDA_FEATURE_SERVER_IMAGE = \"feastdev/feature-server:aws\"\n\n# feature_store.yaml environment variable name for remote feature server\nFEATURE_STORE_YAML_ENV_NAME: str = \"FEATURE_STORE_YAML_BASE64\"\n\n# Environment variable for toggling usage\nFEAST_USAGE = \"FEAST_USAGE\"\n", "path": "sdk/python/feast/constants.py"}]}
1,374
103
gh_patches_debug_50432
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-4754
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve unexpected error message Many users are reporting / filling an issue in our issue tracker when this message is shown to them, which is logic because it's what the message says. > There was a problem with Read the Docs while building your documentation. Please report this to us with your build id (1234) Although, I think we should improve this message saying something like "if this problem persists, please report..." or something similar to that. Otherwise, sometimes it's a temporal failure and we get tons of reports. </issue> <code> [start of readthedocs/doc_builder/exceptions.py] 1 # -*- coding: utf-8 -*- 2 """Exceptions raised when building documentation.""" 3 4 from __future__ import division, print_function, unicode_literals 5 6 from django.utils.translation import ugettext_noop 7 8 9 class BuildEnvironmentException(Exception): 10 11 message = None 12 status_code = None 13 14 def __init__(self, message=None, **kwargs): 15 self.status_code = kwargs.pop('status_code', None) or self.status_code or 1 16 message = message or self.get_default_message() 17 super(BuildEnvironmentException, self).__init__(message, **kwargs) 18 19 def get_default_message(self): 20 return self.message 21 22 23 class BuildEnvironmentError(BuildEnvironmentException): 24 25 GENERIC_WITH_BUILD_ID = ugettext_noop( 26 'There was a problem with Read the Docs while building your documentation. ' 27 'Please report this to us with your build id ({build_id}).', 28 ) 29 30 31 class BuildEnvironmentCreationFailed(BuildEnvironmentError): 32 33 message = ugettext_noop('Build environment creation failed') 34 35 36 class VersionLockedError(BuildEnvironmentError): 37 38 message = ugettext_noop('Version locked, retrying in 5 minutes.') 39 status_code = 423 40 41 42 class ProjectBuildsSkippedError(BuildEnvironmentError): 43 44 message = ugettext_noop('Builds for this project are temporarily disabled') 45 46 47 class YAMLParseError(BuildEnvironmentError): 48 49 GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop( 50 'Problem parsing YAML configuration. {exception}', 51 ) 52 53 54 class BuildTimeoutError(BuildEnvironmentError): 55 56 message = ugettext_noop('Build exited due to time out') 57 58 59 class BuildEnvironmentWarning(BuildEnvironmentException): 60 pass 61 [end of readthedocs/doc_builder/exceptions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/readthedocs/doc_builder/exceptions.py b/readthedocs/doc_builder/exceptions.py --- a/readthedocs/doc_builder/exceptions.py +++ b/readthedocs/doc_builder/exceptions.py @@ -24,7 +24,9 @@ GENERIC_WITH_BUILD_ID = ugettext_noop( 'There was a problem with Read the Docs while building your documentation. ' - 'Please report this to us with your build id ({build_id}).', + 'Please try again later. ' + 'However, if this problem persists, ' + 'please report this to us with your build id ({build_id}).', )
{"golden_diff": "diff --git a/readthedocs/doc_builder/exceptions.py b/readthedocs/doc_builder/exceptions.py\n--- a/readthedocs/doc_builder/exceptions.py\n+++ b/readthedocs/doc_builder/exceptions.py\n@@ -24,7 +24,9 @@\n \n GENERIC_WITH_BUILD_ID = ugettext_noop(\n 'There was a problem with Read the Docs while building your documentation. '\n- 'Please report this to us with your build id ({build_id}).',\n+ 'Please try again later. '\n+ 'However, if this problem persists, '\n+ 'please report this to us with your build id ({build_id}).',\n )\n", "issue": "Improve unexpected error message\nMany users are reporting / filling an issue in our issue tracker when this message is shown to them, which is logic because it's what the message says.\r\n\r\n> There was a problem with Read the Docs while building your documentation. Please report this to us with your build id (1234)\r\n\r\nAlthough, I think we should improve this message saying something like \"if this problem persists, please report...\" or something similar to that. Otherwise, sometimes it's a temporal failure and we get tons of reports.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Exceptions raised when building documentation.\"\"\"\n\nfrom __future__ import division, print_function, unicode_literals\n\nfrom django.utils.translation import ugettext_noop\n\n\nclass BuildEnvironmentException(Exception):\n\n message = None\n status_code = None\n\n def __init__(self, message=None, **kwargs):\n self.status_code = kwargs.pop('status_code', None) or self.status_code or 1\n message = message or self.get_default_message()\n super(BuildEnvironmentException, self).__init__(message, **kwargs)\n\n def get_default_message(self):\n return self.message\n\n\nclass BuildEnvironmentError(BuildEnvironmentException):\n\n GENERIC_WITH_BUILD_ID = ugettext_noop(\n 'There was a problem with Read the Docs while building your documentation. '\n 'Please report this to us with your build id ({build_id}).',\n )\n\n\nclass BuildEnvironmentCreationFailed(BuildEnvironmentError):\n\n message = ugettext_noop('Build environment creation failed')\n\n\nclass VersionLockedError(BuildEnvironmentError):\n\n message = ugettext_noop('Version locked, retrying in 5 minutes.')\n status_code = 423\n\n\nclass ProjectBuildsSkippedError(BuildEnvironmentError):\n\n message = ugettext_noop('Builds for this project are temporarily disabled')\n\n\nclass YAMLParseError(BuildEnvironmentError):\n\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n 'Problem parsing YAML configuration. {exception}',\n )\n\n\nclass BuildTimeoutError(BuildEnvironmentError):\n\n message = ugettext_noop('Build exited due to time out')\n\n\nclass BuildEnvironmentWarning(BuildEnvironmentException):\n pass\n", "path": "readthedocs/doc_builder/exceptions.py"}]}
1,121
141
gh_patches_debug_15903
rasdani/github-patches
git_diff
pyload__pyload-1093
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ShareOnline PrePaid is not recognized As the title says. A PrePaid ShareOnline Account is not recognized as valid Premium Account and therefore pyload asks for Captchas when trying to download an URL. ![Share Online User Profile](https://cloud.githubusercontent.com/assets/3457747/5946557/f3099568-a736-11e4-891e-11db8404ef96.png) ![pyload](https://cloud.githubusercontent.com/assets/3457747/5946588/49292972-a737-11e4-8040-1bbb692f945d.png) </issue> <code> [start of module/plugins/accounts/ShareonlineBiz.py] 1 # -*- coding: utf-8 -*- 2 3 import re 4 5 from module.plugins.Account import Account 6 7 8 class ShareonlineBiz(Account): 9 __name__ = "ShareonlineBiz" 10 __type__ = "account" 11 __version__ = "0.30" 12 13 __description__ = """Share-online.biz account plugin""" 14 __license__ = "GPLv3" 15 __authors__ = [("Walter Purcaro", "[email protected]")] 16 17 18 def api_response(self, user, req): 19 return req.load("http://api.share-online.biz/cgi-bin", 20 get={'q' : "userdetails", 21 'aux' : "traffic", 22 'username': user, 23 'password': self.getAccountData(user)['password']}) 24 25 26 def loadAccountInfo(self, user, req): 27 premium = False 28 validuntil = None 29 trafficleft = -1 30 maxtraffic = 100 * 1024 * 1024 * 1024 #: 100 GB 31 32 api = {} 33 for line in self.api_response(user, req).splitlines(): 34 if "=" in line: 35 key, value = line.split("=") 36 api[key] = value 37 38 self.logDebug(api) 39 40 if api['a'].lower() != "not_available": 41 req.cj.setCookie("share-online.biz", 'a', api['a']) 42 43 premium = api['group'] == "Premium" 44 45 validuntil = float(api['expire_date']) 46 47 traffic = float(api['traffic_1d'].split(";")[0]) 48 maxtraffic = max(maxtraffic, traffic) 49 trafficleft = maxtraffic - traffic 50 51 maxtraffic /= 1024 #@TODO: Remove `/ 1024` in 0.4.10 52 trafficleft /= 1024 #@TODO: Remove `/ 1024` in 0.4.10 53 54 return {'premium': premium, 'validuntil': validuntil, 'trafficleft': trafficleft, 'maxtraffic': maxtraffic} 55 56 57 def login(self, user, data, req): 58 html = self.api_response(user, req) 59 err = re.search(r'\*\*(.+?)\*\*', html) 60 if err: 61 self.logError(err.group(1)) 62 self.wrongPassword() 63 [end of module/plugins/accounts/ShareonlineBiz.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/module/plugins/accounts/ShareonlineBiz.py b/module/plugins/accounts/ShareonlineBiz.py --- a/module/plugins/accounts/ShareonlineBiz.py +++ b/module/plugins/accounts/ShareonlineBiz.py @@ -8,7 +8,7 @@ class ShareonlineBiz(Account): __name__ = "ShareonlineBiz" __type__ = "account" - __version__ = "0.30" + __version__ = "0.31" __description__ = """Share-online.biz account plugin""" __license__ = "GPLv3" @@ -40,7 +40,7 @@ if api['a'].lower() != "not_available": req.cj.setCookie("share-online.biz", 'a', api['a']) - premium = api['group'] == "Premium" + premium = api['group'] in ["Premium", "PrePaid"] validuntil = float(api['expire_date'])
{"golden_diff": "diff --git a/module/plugins/accounts/ShareonlineBiz.py b/module/plugins/accounts/ShareonlineBiz.py\n--- a/module/plugins/accounts/ShareonlineBiz.py\n+++ b/module/plugins/accounts/ShareonlineBiz.py\n@@ -8,7 +8,7 @@\n class ShareonlineBiz(Account):\n __name__ = \"ShareonlineBiz\"\n __type__ = \"account\"\n- __version__ = \"0.30\"\n+ __version__ = \"0.31\"\n \n __description__ = \"\"\"Share-online.biz account plugin\"\"\"\n __license__ = \"GPLv3\"\n@@ -40,7 +40,7 @@\n if api['a'].lower() != \"not_available\":\n req.cj.setCookie(\"share-online.biz\", 'a', api['a'])\n \n- premium = api['group'] == \"Premium\"\n+ premium = api['group'] in [\"Premium\", \"PrePaid\"]\n \n validuntil = float(api['expire_date'])\n", "issue": "ShareOnline PrePaid is not recognized\nAs the title says. A PrePaid ShareOnline Account is not recognized as valid Premium Account and therefore pyload asks for Captchas when trying to download an URL.\n\n![Share Online User Profile](https://cloud.githubusercontent.com/assets/3457747/5946557/f3099568-a736-11e4-891e-11db8404ef96.png)\n\n![pyload](https://cloud.githubusercontent.com/assets/3457747/5946588/49292972-a737-11e4-8040-1bbb692f945d.png)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\n\nfrom module.plugins.Account import Account\n\n\nclass ShareonlineBiz(Account):\n __name__ = \"ShareonlineBiz\"\n __type__ = \"account\"\n __version__ = \"0.30\"\n\n __description__ = \"\"\"Share-online.biz account plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Walter Purcaro\", \"[email protected]\")]\n\n\n def api_response(self, user, req):\n return req.load(\"http://api.share-online.biz/cgi-bin\",\n get={'q' : \"userdetails\",\n 'aux' : \"traffic\",\n 'username': user,\n 'password': self.getAccountData(user)['password']})\n\n\n def loadAccountInfo(self, user, req):\n premium = False\n validuntil = None\n trafficleft = -1\n maxtraffic = 100 * 1024 * 1024 * 1024 #: 100 GB\n\n api = {}\n for line in self.api_response(user, req).splitlines():\n if \"=\" in line:\n key, value = line.split(\"=\")\n api[key] = value\n\n self.logDebug(api)\n\n if api['a'].lower() != \"not_available\":\n req.cj.setCookie(\"share-online.biz\", 'a', api['a'])\n\n premium = api['group'] == \"Premium\"\n\n validuntil = float(api['expire_date'])\n\n traffic = float(api['traffic_1d'].split(\";\")[0])\n maxtraffic = max(maxtraffic, traffic)\n trafficleft = maxtraffic - traffic\n\n maxtraffic /= 1024 #@TODO: Remove `/ 1024` in 0.4.10\n trafficleft /= 1024 #@TODO: Remove `/ 1024` in 0.4.10\n\n return {'premium': premium, 'validuntil': validuntil, 'trafficleft': trafficleft, 'maxtraffic': maxtraffic}\n\n\n def login(self, user, data, req):\n html = self.api_response(user, req)\n err = re.search(r'\\*\\*(.+?)\\*\\*', html)\n if err:\n self.logError(err.group(1))\n self.wrongPassword()\n", "path": "module/plugins/accounts/ShareonlineBiz.py"}]}
1,357
212
gh_patches_debug_21992
rasdani/github-patches
git_diff
akvo__akvo-rsr-3685
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Allow patching indicator with empty list of dimension names </issue> <code> [start of akvo/rest/serializers/indicator.py] 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 from akvo.rest.serializers.indicator_period import IndicatorPeriodFrameworkSerializer 8 from akvo.rest.serializers.indicator_dimension_name import IndicatorDimensionNameSerializer 9 from akvo.rest.serializers.rsr_serializer import BaseRSRSerializer 10 from akvo.rsr.models import Indicator 11 12 from rest_framework import serializers 13 14 15 class IndicatorSerializer(BaseRSRSerializer): 16 17 result_unicode = serializers.ReadOnlyField(source='result.__unicode__') 18 measure_label = serializers.ReadOnlyField(source='iati_measure_unicode') 19 children_aggregate_percentage = serializers.ReadOnlyField() 20 21 class Meta: 22 model = Indicator 23 fields = '__all__' 24 25 # TODO: add validation for parent_indicator 26 27 28 class IndicatorFrameworkSerializer(BaseRSRSerializer): 29 30 periods = IndicatorPeriodFrameworkSerializer(many=True, required=False, read_only=True) 31 parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id') 32 children_aggregate_percentage = serializers.ReadOnlyField() 33 dimension_names = IndicatorDimensionNameSerializer(many=True, required=False, read_only=True) 34 35 class Meta: 36 model = Indicator 37 fields = '__all__' 38 [end of akvo/rest/serializers/indicator.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rest/serializers/indicator.py b/akvo/rest/serializers/indicator.py --- a/akvo/rest/serializers/indicator.py +++ b/akvo/rest/serializers/indicator.py @@ -7,7 +7,7 @@ from akvo.rest.serializers.indicator_period import IndicatorPeriodFrameworkSerializer from akvo.rest.serializers.indicator_dimension_name import IndicatorDimensionNameSerializer from akvo.rest.serializers.rsr_serializer import BaseRSRSerializer -from akvo.rsr.models import Indicator +from akvo.rsr.models import Indicator, IndicatorDimensionName from rest_framework import serializers @@ -17,6 +17,8 @@ result_unicode = serializers.ReadOnlyField(source='result.__unicode__') measure_label = serializers.ReadOnlyField(source='iati_measure_unicode') children_aggregate_percentage = serializers.ReadOnlyField() + dimension_names = serializers.PrimaryKeyRelatedField( + many=True, queryset=IndicatorDimensionName.objects.all()) class Meta: model = Indicator
{"golden_diff": "diff --git a/akvo/rest/serializers/indicator.py b/akvo/rest/serializers/indicator.py\n--- a/akvo/rest/serializers/indicator.py\n+++ b/akvo/rest/serializers/indicator.py\n@@ -7,7 +7,7 @@\n from akvo.rest.serializers.indicator_period import IndicatorPeriodFrameworkSerializer\n from akvo.rest.serializers.indicator_dimension_name import IndicatorDimensionNameSerializer\n from akvo.rest.serializers.rsr_serializer import BaseRSRSerializer\n-from akvo.rsr.models import Indicator\n+from akvo.rsr.models import Indicator, IndicatorDimensionName\n \n from rest_framework import serializers\n \n@@ -17,6 +17,8 @@\n result_unicode = serializers.ReadOnlyField(source='result.__unicode__')\n measure_label = serializers.ReadOnlyField(source='iati_measure_unicode')\n children_aggregate_percentage = serializers.ReadOnlyField()\n+ dimension_names = serializers.PrimaryKeyRelatedField(\n+ many=True, queryset=IndicatorDimensionName.objects.all())\n \n class Meta:\n model = Indicator\n", "issue": "Allow patching indicator with empty list of dimension names\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom akvo.rest.serializers.indicator_period import IndicatorPeriodFrameworkSerializer\nfrom akvo.rest.serializers.indicator_dimension_name import IndicatorDimensionNameSerializer\nfrom akvo.rest.serializers.rsr_serializer import BaseRSRSerializer\nfrom akvo.rsr.models import Indicator\n\nfrom rest_framework import serializers\n\n\nclass IndicatorSerializer(BaseRSRSerializer):\n\n result_unicode = serializers.ReadOnlyField(source='result.__unicode__')\n measure_label = serializers.ReadOnlyField(source='iati_measure_unicode')\n children_aggregate_percentage = serializers.ReadOnlyField()\n\n class Meta:\n model = Indicator\n fields = '__all__'\n\n # TODO: add validation for parent_indicator\n\n\nclass IndicatorFrameworkSerializer(BaseRSRSerializer):\n\n periods = IndicatorPeriodFrameworkSerializer(many=True, required=False, read_only=True)\n parent_indicator = serializers.ReadOnlyField(source='parent_indicator_id')\n children_aggregate_percentage = serializers.ReadOnlyField()\n dimension_names = IndicatorDimensionNameSerializer(many=True, required=False, read_only=True)\n\n class Meta:\n model = Indicator\n fields = '__all__'\n", "path": "akvo/rest/serializers/indicator.py"}]}
919
221
gh_patches_debug_29786
rasdani/github-patches
git_diff
iterative__dvc-2017
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> dvc: unify requirements files and setup.py We have duplication between `requirements.txt` and `setup.py` `install_requires`. We also use three `pip install` lines in docs to set things up. Ideally we would just say: ```bash pip install -e .[tests] # or pip install -e .[all,tests] # for all remotes ``` So this contains several parts: - [ ] include `requirements.txt` in test requirements with `-r` syntax, - [ ] parse requirements files in `setup.py` or drop requirement files - [ ] update contributing docs </issue> <code> [start of setup.py] 1 from setuptools import setup, find_packages 2 from setuptools.command.build_py import build_py as _build_py 3 import os 4 5 6 # https://packaging.python.org/guides/single-sourcing-package-version/ 7 pkg_dir = os.path.dirname(__file__) 8 9 # This will define __version__ implicitly 10 with open(os.path.join(pkg_dir, "dvc", "version.py")) as fobj: 11 exec(fobj.read()) 12 13 version = __version__ # noqa: F821 14 15 16 # To achieve consistency between the build version and the one provided 17 # by your package during runtime, you need to **pin** the build version. 18 # 19 # This custom class will replace the version.py module with a **static** 20 # `__version__` that your package can read at runtime, assuring consistancy. 21 # 22 # References: 23 # - https://docs.python.org/3.7/distutils/extending.html 24 # - https://github.com/python/mypy 25 class build_py(_build_py): 26 def pin_version(self): 27 path = os.path.join(self.build_lib, "dvc") 28 self.mkpath(path) 29 with open(os.path.join(path, "version.py"), "w") as fobj: 30 fobj.write("# AUTOGENERATED at build time by setup.py\n") 31 fobj.write('__version__ = "{}"\n'.format(version)) 32 33 def run(self): 34 self.execute(self.pin_version, ()) 35 _build_py.run(self) 36 37 38 install_requires = [ 39 "ply>=3.9", # See https://github.com/pyinstaller/pyinstaller/issues/1945 40 "configparser>=3.5.0", 41 "zc.lockfile>=1.2.1", 42 "future>=0.16.0", 43 "colorama>=0.3.9", 44 "configobj>=5.0.6", 45 "networkx>=2.1", 46 "gitpython>=2.1.8", 47 "setuptools>=34.0.0", 48 "nanotime>=0.5.2", 49 "pyasn1>=0.4.1", 50 "schema>=0.6.7", 51 "jsonpath-ng>=1.4.3", 52 "requests>=2.22.0", 53 "grandalf==0.6", 54 "asciimatics>=1.10.0", 55 "distro>=1.3.0", 56 "appdirs>=1.4.3", 57 "treelib>=1.5.5", 58 "inflect>=2.1.0", 59 "humanize>=0.5.1", 60 "dulwich>=0.19.11", 61 "ruamel.yaml>=0.15.91", 62 ] 63 64 # Extra dependencies for remote integrations 65 gs = ["google-cloud-storage==1.13.0"] 66 s3 = ["boto3==1.9.115"] 67 azure = ["azure-storage-blob==1.3.0"] 68 oss = ["oss2==2.6.1"] 69 ssh = ["paramiko>=2.4.1"] 70 all_remotes = gs + s3 + azure + ssh + oss 71 72 setup( 73 name="dvc", 74 version=version, 75 description="Git for data scientists - manage your code and data together", 76 long_description=open("README.rst", "r").read(), 77 author="Dmitry Petrov", 78 author_email="[email protected]", 79 download_url="https://github.com/iterative/dvc", 80 license="Apache License 2.0", 81 install_requires=install_requires, 82 extras_require={ 83 "all": all_remotes, 84 "gs": gs, 85 "s3": s3, 86 "azure": azure, 87 "oss": oss, 88 "ssh": ssh, 89 # NOTE: https://github.com/inveniosoftware/troubleshooting/issues/1 90 ':python_version=="2.7"': ["futures", "pathlib2"], 91 }, 92 keywords="data science, data version control, machine learning", 93 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*", 94 classifiers=[ 95 "Development Status :: 4 - Beta", 96 "Programming Language :: Python :: 2", 97 "Programming Language :: Python :: 2.7", 98 "Programming Language :: Python :: 3", 99 "Programming Language :: Python :: 3.5", 100 "Programming Language :: Python :: 3.6", 101 "Programming Language :: Python :: 3.7", 102 ], 103 packages=find_packages(exclude=["tests"]), 104 include_package_data=True, 105 url="http://dataversioncontrol.com", 106 entry_points={"console_scripts": ["dvc = dvc.main:main"]}, 107 cmdclass={"build_py": build_py}, 108 zip_safe=False, 109 ) 110 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,6 +1,7 @@ from setuptools import setup, find_packages from setuptools.command.build_py import build_py as _build_py import os +import sys # https://packaging.python.org/guides/single-sourcing-package-version/ @@ -69,6 +70,34 @@ ssh = ["paramiko>=2.4.1"] all_remotes = gs + s3 + azure + ssh + oss +# Extra dependecies to run tests +tests_requirements = [ + "PyInstaller==3.4", + "wheel>=0.31.1", + "pydot>=1.2.4", + # Test requirements: + "pytest>=4.4.0", + "pytest-timeout>=1.3.3", + "pytest-cov>=2.6.1", + "pytest-xdist>=1.26.1", + "pytest-mock>=1.10.4", + "flaky>=3.5.3", + "mock>=3.0.0", + "xmltodict>=0.11.0", + "awscli>=1.16.125", + "google-compute-engine", + "pywin32; sys_platform == 'win32'", + "Pygments", # required by collective.checkdocs, + "collective.checkdocs", + "flake8", + "flake8-docstrings", + "jaraco.windows==3.9.2", + "mock-ssh-server>=0.5.0", +] + +if (sys.version_info) >= (3, 6): + tests_requirements.append("black==19.3b0") + setup( name="dvc", version=version, @@ -87,7 +116,8 @@ "oss": oss, "ssh": ssh, # NOTE: https://github.com/inveniosoftware/troubleshooting/issues/1 - ':python_version=="2.7"': ["futures", "pathlib2"], + ":python_version=='2.7'": ["futures", "pathlib2"], + "tests": tests_requirements, }, keywords="data science, data version control, machine learning", python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,6 +1,7 @@\n from setuptools import setup, find_packages\n from setuptools.command.build_py import build_py as _build_py\n import os\n+import sys\n \n \n # https://packaging.python.org/guides/single-sourcing-package-version/\n@@ -69,6 +70,34 @@\n ssh = [\"paramiko>=2.4.1\"]\n all_remotes = gs + s3 + azure + ssh + oss\n \n+# Extra dependecies to run tests\n+tests_requirements = [\n+ \"PyInstaller==3.4\",\n+ \"wheel>=0.31.1\",\n+ \"pydot>=1.2.4\",\n+ # Test requirements:\n+ \"pytest>=4.4.0\",\n+ \"pytest-timeout>=1.3.3\",\n+ \"pytest-cov>=2.6.1\",\n+ \"pytest-xdist>=1.26.1\",\n+ \"pytest-mock>=1.10.4\",\n+ \"flaky>=3.5.3\",\n+ \"mock>=3.0.0\",\n+ \"xmltodict>=0.11.0\",\n+ \"awscli>=1.16.125\",\n+ \"google-compute-engine\",\n+ \"pywin32; sys_platform == 'win32'\",\n+ \"Pygments\", # required by collective.checkdocs,\n+ \"collective.checkdocs\",\n+ \"flake8\",\n+ \"flake8-docstrings\",\n+ \"jaraco.windows==3.9.2\",\n+ \"mock-ssh-server>=0.5.0\",\n+]\n+\n+if (sys.version_info) >= (3, 6):\n+ tests_requirements.append(\"black==19.3b0\")\n+\n setup(\n name=\"dvc\",\n version=version,\n@@ -87,7 +116,8 @@\n \"oss\": oss,\n \"ssh\": ssh,\n # NOTE: https://github.com/inveniosoftware/troubleshooting/issues/1\n- ':python_version==\"2.7\"': [\"futures\", \"pathlib2\"],\n+ \":python_version=='2.7'\": [\"futures\", \"pathlib2\"],\n+ \"tests\": tests_requirements,\n },\n keywords=\"data science, data version control, machine learning\",\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n", "issue": "dvc: unify requirements files and setup.py\nWe have duplication between `requirements.txt` and `setup.py` `install_requires`. We also use three `pip install` lines in docs to set things up. Ideally we would just say:\r\n```bash\r\npip install -e .[tests]\r\n# or\r\npip install -e .[all,tests] # for all remotes\r\n```\r\nSo this contains several parts:\r\n- [ ] include `requirements.txt` in test requirements with `-r` syntax,\r\n- [ ] parse requirements files in `setup.py` or drop requirement files \r\n- [ ] update contributing docs\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py as _build_py\nimport os\n\n\n# https://packaging.python.org/guides/single-sourcing-package-version/\npkg_dir = os.path.dirname(__file__)\n\n# This will define __version__ implicitly\nwith open(os.path.join(pkg_dir, \"dvc\", \"version.py\")) as fobj:\n exec(fobj.read())\n\nversion = __version__ # noqa: F821\n\n\n# To achieve consistency between the build version and the one provided\n# by your package during runtime, you need to **pin** the build version.\n#\n# This custom class will replace the version.py module with a **static**\n# `__version__` that your package can read at runtime, assuring consistancy.\n#\n# References:\n# - https://docs.python.org/3.7/distutils/extending.html\n# - https://github.com/python/mypy\nclass build_py(_build_py):\n def pin_version(self):\n path = os.path.join(self.build_lib, \"dvc\")\n self.mkpath(path)\n with open(os.path.join(path, \"version.py\"), \"w\") as fobj:\n fobj.write(\"# AUTOGENERATED at build time by setup.py\\n\")\n fobj.write('__version__ = \"{}\"\\n'.format(version))\n\n def run(self):\n self.execute(self.pin_version, ())\n _build_py.run(self)\n\n\ninstall_requires = [\n \"ply>=3.9\", # See https://github.com/pyinstaller/pyinstaller/issues/1945\n \"configparser>=3.5.0\",\n \"zc.lockfile>=1.2.1\",\n \"future>=0.16.0\",\n \"colorama>=0.3.9\",\n \"configobj>=5.0.6\",\n \"networkx>=2.1\",\n \"gitpython>=2.1.8\",\n \"setuptools>=34.0.0\",\n \"nanotime>=0.5.2\",\n \"pyasn1>=0.4.1\",\n \"schema>=0.6.7\",\n \"jsonpath-ng>=1.4.3\",\n \"requests>=2.22.0\",\n \"grandalf==0.6\",\n \"asciimatics>=1.10.0\",\n \"distro>=1.3.0\",\n \"appdirs>=1.4.3\",\n \"treelib>=1.5.5\",\n \"inflect>=2.1.0\",\n \"humanize>=0.5.1\",\n \"dulwich>=0.19.11\",\n \"ruamel.yaml>=0.15.91\",\n]\n\n# Extra dependencies for remote integrations\ngs = [\"google-cloud-storage==1.13.0\"]\ns3 = [\"boto3==1.9.115\"]\nazure = [\"azure-storage-blob==1.3.0\"]\noss = [\"oss2==2.6.1\"]\nssh = [\"paramiko>=2.4.1\"]\nall_remotes = gs + s3 + azure + ssh + oss\n\nsetup(\n name=\"dvc\",\n version=version,\n description=\"Git for data scientists - manage your code and data together\",\n long_description=open(\"README.rst\", \"r\").read(),\n author=\"Dmitry Petrov\",\n author_email=\"[email protected]\",\n download_url=\"https://github.com/iterative/dvc\",\n license=\"Apache License 2.0\",\n install_requires=install_requires,\n extras_require={\n \"all\": all_remotes,\n \"gs\": gs,\n \"s3\": s3,\n \"azure\": azure,\n \"oss\": oss,\n \"ssh\": ssh,\n # NOTE: https://github.com/inveniosoftware/troubleshooting/issues/1\n ':python_version==\"2.7\"': [\"futures\", \"pathlib2\"],\n },\n keywords=\"data science, data version control, machine learning\",\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(exclude=[\"tests\"]),\n include_package_data=True,\n url=\"http://dataversioncontrol.com\",\n entry_points={\"console_scripts\": [\"dvc = dvc.main:main\"]},\n cmdclass={\"build_py\": build_py},\n zip_safe=False,\n)\n", "path": "setup.py"}]}
1,933
562
gh_patches_debug_120
rasdani/github-patches
git_diff
pytorch__TensorRT-1896
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Upgrade `release/1.4` to Torch 2.0.1 + TensorRT 8.6.1 - Also upgrade `main` to TensorRT 8.6.1 (as a commit to #1852) </issue> <code> [start of py/versions.py] 1 __version__ = "1.4.0.rc0" 2 __cuda_version__ = "11.8" 3 __cudnn_version__ = "8.8" 4 __tensorrt_version__ = "8.6" 5 [end of py/versions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/py/versions.py b/py/versions.py --- a/py/versions.py +++ b/py/versions.py @@ -1,4 +1,4 @@ -__version__ = "1.4.0.rc0" +__version__ = "1.4.0" __cuda_version__ = "11.8" __cudnn_version__ = "8.8" __tensorrt_version__ = "8.6"
{"golden_diff": "diff --git a/py/versions.py b/py/versions.py\n--- a/py/versions.py\n+++ b/py/versions.py\n@@ -1,4 +1,4 @@\n-__version__ = \"1.4.0.rc0\"\n+__version__ = \"1.4.0\"\n __cuda_version__ = \"11.8\"\n __cudnn_version__ = \"8.8\"\n __tensorrt_version__ = \"8.6\"\n", "issue": "Upgrade `release/1.4` to Torch 2.0.1 + TensorRT 8.6.1\n- Also upgrade `main` to TensorRT 8.6.1 (as a commit to #1852)\n", "before_files": [{"content": "__version__ = \"1.4.0.rc0\"\n__cuda_version__ = \"11.8\"\n__cudnn_version__ = \"8.8\"\n__tensorrt_version__ = \"8.6\"\n", "path": "py/versions.py"}]}
638
99
gh_patches_debug_23202
rasdani/github-patches
git_diff
bridgecrewio__checkov-616
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Check: CKV_GCP_19 False positive **Describe the bug** Checkov Will flag your code even if the `basic-auth` is already disabled on your cluster. **To Reproduce** Steps to reproduce the behavior: 1. Have a file as follows: ``` resource "google_container_cluster" "cluster-test" { name = "cluster-test" location = "europe-west1-c" provider = google-beta remove_default_node_pool = true initial_node_count = 1 enable_shielded_nodes = true release_channel { channel = "RAPID" } pod_security_policy_config { enabled = true } master_auth { username = "" password = "" client_certificate_config { issue_client_certificate = false } } } ``` 2. Run cli command 'checkov -d path/to/your/terraform/folder.' 3. See error: ``` Check: CKV_GCP_19: "Ensure GKE basic auth is disabled" FAILED for resource: google_container_cluster.cluster-test File: /cluster.tf:1-27 Guide: https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_11 1 | resource "google_container_cluster" "cluster-test" { 2 | name = "cluster-test" 3 | location = "europe-west1-c" 4 | provider = google-beta 5 | 6 | remove_default_node_pool = true 7 | initial_node_count = 1 8 | 9 | enable_shielded_nodes = true 10 | 11 | release_channel { 12 | channel = "RAPID" 13 | } 14 | 15 | pod_security_policy_config { 16 | enabled = true 17 | } 18 | 19 | master_auth { 20 | username = "" 21 | password = "" 22 | 23 | client_certificate_config { 24 | issue_client_certificate = false 25 | } 26 | } 27 | } ``` **Expected behavior** ``` Check: CKV_GCP_19: "Ensure GKE basic auth is disabled" PASSED for resource: google_container_cluster.cluster-test File: /cluster.tf:1-27 Guide: https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_7 ``` The `basic-auth` is already supposed to be disabled using this bit of code as: ``` master_auth { username = "" password = "" client_certificate_config { issue_client_certificate = false } } ``` **Environment:** - CI: Github Actions - OS: Ubuntu-lastest - Checkov Version [latest] - Terraform v0.12.24 **Additional context** Tested and installed today following documentation available here: https://www.checkov.io/1.Introduction/Getting%20Started.html </issue> <code> [start of checkov/terraform/checks/resource/gcp/GKEBasicAuth.py] 1 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 2 from checkov.common.models.enums import CheckResult, CheckCategories 3 4 5 class GKEBasicAuth(BaseResourceCheck): 6 def __init__(self): 7 name = "Ensure GKE basic auth is disabled" 8 id = "CKV_GCP_19" 9 supported_resources = ['google_container_cluster'] 10 categories = [CheckCategories.KUBERNETES] 11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 12 13 def scan_resource_conf(self, conf): 14 """ 15 Looks for password configuration at azure_instance: 16 https://www.terraform.io/docs/providers/google/r/compute_ssl_policy.html 17 :param conf: google_compute_ssl_policy configuration 18 :return: <CheckResult> 19 """ 20 if 'master_auth' in conf.keys(): 21 if conf['master_auth'][0].get('username') or conf['master_auth'][0].get('password'): 22 return CheckResult.FAILED 23 return CheckResult.PASSED 24 return CheckResult.FAILED 25 26 27 check = GKEBasicAuth() 28 [end of checkov/terraform/checks/resource/gcp/GKEBasicAuth.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py b/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py --- a/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py +++ b/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py @@ -1,5 +1,5 @@ -from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck from checkov.common.models.enums import CheckResult, CheckCategories +from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck class GKEBasicAuth(BaseResourceCheck): @@ -18,7 +18,13 @@ :return: <CheckResult> """ if 'master_auth' in conf.keys(): - if conf['master_auth'][0].get('username') or conf['master_auth'][0].get('password'): + username = conf['master_auth'][0].get('username') + password = conf['master_auth'][0].get('password') + if username or password: + # only if both are set to the empty string it is fine + # https://www.terraform.io/docs/providers/google/r/container_cluster.html + if len(username) == 1 and len(password) == 1 and username[0] == '' and password[0] == '': + return CheckResult.PASSED return CheckResult.FAILED return CheckResult.PASSED return CheckResult.FAILED
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py b/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py\n--- a/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py\n+++ b/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py\n@@ -1,5 +1,5 @@\n-from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n from checkov.common.models.enums import CheckResult, CheckCategories\n+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n \n class GKEBasicAuth(BaseResourceCheck):\n@@ -18,7 +18,13 @@\n :return: <CheckResult>\n \"\"\"\n if 'master_auth' in conf.keys():\n- if conf['master_auth'][0].get('username') or conf['master_auth'][0].get('password'):\n+ username = conf['master_auth'][0].get('username')\n+ password = conf['master_auth'][0].get('password')\n+ if username or password:\n+ # only if both are set to the empty string it is fine\n+ # https://www.terraform.io/docs/providers/google/r/container_cluster.html\n+ if len(username) == 1 and len(password) == 1 and username[0] == '' and password[0] == '':\n+ return CheckResult.PASSED\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.FAILED\n", "issue": "Check: CKV_GCP_19 False positive\n**Describe the bug**\r\nCheckov Will flag your code even if the `basic-auth` is already disabled on your cluster.\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Have a file as follows:\r\n``` \r\nresource \"google_container_cluster\" \"cluster-test\" {\r\n name = \"cluster-test\"\r\n location = \"europe-west1-c\"\r\n provider = google-beta\r\n\r\n remove_default_node_pool = true\r\n initial_node_count = 1\r\n\r\n enable_shielded_nodes = true\r\n\r\n release_channel {\r\n channel = \"RAPID\"\r\n }\r\n\r\n pod_security_policy_config {\r\n enabled = true\r\n }\r\n\r\n master_auth {\r\n username = \"\"\r\n password = \"\"\r\n\r\n client_certificate_config {\r\n issue_client_certificate = false\r\n }\r\n }\r\n}\r\n``` \r\n2. Run cli command 'checkov -d path/to/your/terraform/folder.'\r\n3. See error:\r\n``` \r\nCheck: CKV_GCP_19: \"Ensure GKE basic auth is disabled\"\r\n\tFAILED for resource: google_container_cluster.cluster-test\r\n\tFile: /cluster.tf:1-27\r\n\tGuide: https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_11\r\n\r\n\t\t1 | resource \"google_container_cluster\" \"cluster-test\" {\r\n\t\t2 | name = \"cluster-test\"\r\n\t\t3 | location = \"europe-west1-c\"\r\n\t\t4 | provider = google-beta\r\n\t\t5 | \r\n\t\t6 | remove_default_node_pool = true\r\n\t\t7 | initial_node_count = 1\r\n\t\t8 | \r\n\t\t9 | enable_shielded_nodes = true\r\n\t\t10 | \r\n\t\t11 | release_channel {\r\n\t\t12 | channel = \"RAPID\"\r\n\t\t13 | }\r\n\t\t14 | \r\n\t\t15 | pod_security_policy_config {\r\n\t\t16 | enabled = true\r\n\t\t17 | }\r\n\t\t18 | \r\n\t\t19 | master_auth {\r\n\t\t20 | username = \"\"\r\n\t\t21 | password = \"\"\r\n\t\t22 | \r\n\t\t23 | client_certificate_config {\r\n\t\t24 | issue_client_certificate = false\r\n\t\t25 | }\r\n\t\t26 | }\r\n\t\t27 | }\r\n``` \r\n\r\n**Expected behavior**\r\n``` \r\nCheck: CKV_GCP_19: \"Ensure GKE basic auth is disabled\"\r\n\tPASSED for resource: google_container_cluster.cluster-test\r\n\tFile: /cluster.tf:1-27\r\n\tGuide: https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_7\r\n``` \r\n\r\nThe `basic-auth` is already supposed to be disabled using this bit of code as:\r\n``` \r\n master_auth {\r\n username = \"\"\r\n password = \"\"\r\n\r\n client_certificate_config {\r\n issue_client_certificate = false\r\n }\r\n }\r\n```\r\n\r\n**Environment:**\r\n - CI: Github Actions \r\n - OS: Ubuntu-lastest\r\n - Checkov Version [latest]\r\n - Terraform v0.12.24\r\n\r\n**Additional context**\r\nTested and installed today following documentation available here: https://www.checkov.io/1.Introduction/Getting%20Started.html\n", "before_files": [{"content": "from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\n\n\nclass GKEBasicAuth(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure GKE basic auth is disabled\"\n id = \"CKV_GCP_19\"\n supported_resources = ['google_container_cluster']\n categories = [CheckCategories.KUBERNETES]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for password configuration at azure_instance:\n https://www.terraform.io/docs/providers/google/r/compute_ssl_policy.html\n :param conf: google_compute_ssl_policy configuration\n :return: <CheckResult>\n \"\"\"\n if 'master_auth' in conf.keys():\n if conf['master_auth'][0].get('username') or conf['master_auth'][0].get('password'):\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = GKEBasicAuth()\n", "path": "checkov/terraform/checks/resource/gcp/GKEBasicAuth.py"}]}
1,554
327
gh_patches_debug_35697
rasdani/github-patches
git_diff
bridgecrewio__checkov-2501
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CKV_AWS_103 - Mentioned Checkov Checks producing an Exception, when the Listener protocol is HTTP (ALB). Background of the Issue : We have been using mentioned Checkov inbuilt checks/policy ID (CKV_AWS_103/ BC_AWS_GENERAL_43) for connection requests in AWS Load balancer. it was working fine when application team using the Module, "aws_lb_listener" "redirect" will only enable when application team users "HTTPS" Protocol. The particular issue is being presented when we attempt "HTTP" Protocol (Development Environment) Sample Module snippet (AWS Load Balancer Listener ) (Re-direct Action) :- resource "aws_lb_listener" "redirect_http_listeneter" { load_balancer_arn = aws_lb.front_end.arn port = "80" protocol = "HTTP" } default action { type = "redirect" redirect { port = "443" } } } ISSUE :- Exception Occur :- [If application team used the protocol HTTP] Exception message sample "[MainThread][ERROR] Failed to run check: Ensure the load balancer is using TLS1.2 for configuration:{'alpn_policy':[None],'arn':['arn:aws:elasticloadbalancing:**********:listener/app/dev-opsmanager-ec2-alb/******/*****],'certicate_arn':[None], 'default_action': In bridgecrewio github, [ checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py ] supports only HTTPS and it not producing any proper results, if we application team using HTTP. So require your support to tweak globally in the in-built check ID. checkov version : 2.0.704 kindly let me know any other additional inputs require from our end. </issue> <code> [start of checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py] 1 from checkov.common.models.enums import CheckResult, CheckCategories 2 from checkov.common.util.type_forcers import force_list 3 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 4 5 6 class AppLoadBalancerTLS12(BaseResourceCheck): 7 def __init__(self): 8 name = "Ensure that load balancer is using TLS 1.2" 9 id = "CKV_AWS_103" 10 supported_resources = ["aws_lb_listener", "aws_alb_listener"] 11 categories = [CheckCategories.GENERAL_SECURITY] 12 super().__init__( 13 name=name, 14 id=id, 15 categories=categories, 16 supported_resources=supported_resources, 17 ) 18 19 def scan_resource_conf(self, conf): 20 key = "protocol" 21 self.evaluated_keys = [key] 22 if key in conf.keys(): 23 if conf[key] in (["HTTPS"], ["TLS"]): 24 # Only interested in HTTPS & TLS listeners 25 policy = "ssl_policy" 26 if policy in conf.keys(): 27 self.evaluated_keys.append(policy) 28 name = str(conf[policy]).strip("['']") 29 if name.startswith("ELBSecurityPolicy-FS-1-2") or name.startswith("ELBSecurityPolicy-TLS-1-2"): 30 return CheckResult.PASSED 31 return CheckResult.FAILED 32 elif conf[key] in (["TCP"], ["UDP"], ["TCP_UDP"]): 33 return CheckResult.PASSED 34 for idx_action, action in enumerate(conf.get("default_action", [])): 35 redirects = action.get("redirect", []) 36 for idx_redirect, redirect in enumerate(force_list(redirects)): 37 if redirect.get("protocol", []) == ["HTTPS"]: 38 redirect_index = f"[{idx_redirect}]/" if isinstance(redirects, list) else "" 39 self.evaluated_keys.append(f'default_action/[{idx_action}]/redirect/{redirect_index}protocol') 40 return CheckResult.PASSED 41 return CheckResult.FAILED 42 43 44 check = AppLoadBalancerTLS12() 45 [end of checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py b/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py --- a/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py +++ b/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py @@ -1,14 +1,16 @@ +from typing import Dict, List, Any + from checkov.common.models.enums import CheckResult, CheckCategories from checkov.common.util.type_forcers import force_list from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck class AppLoadBalancerTLS12(BaseResourceCheck): - def __init__(self): + def __init__(self) -> None: name = "Ensure that load balancer is using TLS 1.2" id = "CKV_AWS_103" - supported_resources = ["aws_lb_listener", "aws_alb_listener"] - categories = [CheckCategories.GENERAL_SECURITY] + supported_resources = ("aws_lb_listener", "aws_alb_listener") + categories = (CheckCategories.GENERAL_SECURITY,) super().__init__( name=name, id=id, @@ -16,7 +18,7 @@ supported_resources=supported_resources, ) - def scan_resource_conf(self, conf): + def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult: key = "protocol" self.evaluated_keys = [key] if key in conf.keys(): @@ -34,7 +36,7 @@ for idx_action, action in enumerate(conf.get("default_action", [])): redirects = action.get("redirect", []) for idx_redirect, redirect in enumerate(force_list(redirects)): - if redirect.get("protocol", []) == ["HTTPS"]: + if isinstance(redirect, dict) and redirect.get("protocol", []) == ["HTTPS"]: redirect_index = f"[{idx_redirect}]/" if isinstance(redirects, list) else "" self.evaluated_keys.append(f'default_action/[{idx_action}]/redirect/{redirect_index}protocol') return CheckResult.PASSED
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py b/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py\n--- a/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py\n+++ b/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py\n@@ -1,14 +1,16 @@\n+from typing import Dict, List, Any\n+\n from checkov.common.models.enums import CheckResult, CheckCategories\n from checkov.common.util.type_forcers import force_list\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n \n class AppLoadBalancerTLS12(BaseResourceCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n name = \"Ensure that load balancer is using TLS 1.2\"\n id = \"CKV_AWS_103\"\n- supported_resources = [\"aws_lb_listener\", \"aws_alb_listener\"]\n- categories = [CheckCategories.GENERAL_SECURITY]\n+ supported_resources = (\"aws_lb_listener\", \"aws_alb_listener\")\n+ categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(\n name=name,\n id=id,\n@@ -16,7 +18,7 @@\n supported_resources=supported_resources,\n )\n \n- def scan_resource_conf(self, conf):\n+ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n key = \"protocol\"\n self.evaluated_keys = [key]\n if key in conf.keys():\n@@ -34,7 +36,7 @@\n for idx_action, action in enumerate(conf.get(\"default_action\", [])):\n redirects = action.get(\"redirect\", [])\n for idx_redirect, redirect in enumerate(force_list(redirects)):\n- if redirect.get(\"protocol\", []) == [\"HTTPS\"]:\n+ if isinstance(redirect, dict) and redirect.get(\"protocol\", []) == [\"HTTPS\"]:\n redirect_index = f\"[{idx_redirect}]/\" if isinstance(redirects, list) else \"\"\n self.evaluated_keys.append(f'default_action/[{idx_action}]/redirect/{redirect_index}protocol')\n return CheckResult.PASSED\n", "issue": "CKV_AWS_103 - Mentioned Checkov Checks producing an Exception, when the Listener protocol is HTTP (ALB).\nBackground of the Issue : \r\n\r\nWe have been using mentioned Checkov inbuilt checks/policy ID (CKV_AWS_103/ BC_AWS_GENERAL_43) for connection requests in AWS Load balancer. it was working fine when application team using the Module, \"aws_lb_listener\" \"redirect\" will only enable when application team users \"HTTPS\" Protocol. The particular issue is being presented when we attempt \"HTTP\" Protocol (Development Environment)\r\n\r\nSample Module snippet (AWS Load Balancer Listener ) (Re-direct Action) :- \r\n\r\nresource \"aws_lb_listener\" \"redirect_http_listeneter\" {\r\nload_balancer_arn = aws_lb.front_end.arn\r\nport = \"80\"\r\nprotocol = \"HTTP\" \r\n}\r\n\r\ndefault action {\r\n type = \"redirect\"\r\n\r\nredirect {\r\n port = \"443\"\r\n }\r\n}\r\n}\r\n\r\nISSUE :- \r\nException Occur :- [If application team used the protocol HTTP] \r\n\r\nException message sample \r\n\"[MainThread][ERROR] Failed to run check: Ensure the load balancer is using TLS1.2 for configuration:{'alpn_policy':[None],'arn':['arn:aws:elasticloadbalancing:**********:listener/app/dev-opsmanager-ec2-alb/******/*****],'certicate_arn':[None], 'default_action':\r\n\r\nIn bridgecrewio github, [ checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py ]\r\nsupports only HTTPS and it not producing any proper results, if we application team using HTTP. So require your support to tweak globally in the in-built check ID. \r\n\r\ncheckov version : 2.0.704\r\n\r\nkindly let me know any other additional inputs require from our end.\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.util.type_forcers import force_list\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass AppLoadBalancerTLS12(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure that load balancer is using TLS 1.2\"\n id = \"CKV_AWS_103\"\n supported_resources = [\"aws_lb_listener\", \"aws_alb_listener\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(\n name=name,\n id=id,\n categories=categories,\n supported_resources=supported_resources,\n )\n\n def scan_resource_conf(self, conf):\n key = \"protocol\"\n self.evaluated_keys = [key]\n if key in conf.keys():\n if conf[key] in ([\"HTTPS\"], [\"TLS\"]):\n # Only interested in HTTPS & TLS listeners\n policy = \"ssl_policy\"\n if policy in conf.keys():\n self.evaluated_keys.append(policy)\n name = str(conf[policy]).strip(\"['']\")\n if name.startswith(\"ELBSecurityPolicy-FS-1-2\") or name.startswith(\"ELBSecurityPolicy-TLS-1-2\"):\n return CheckResult.PASSED\n return CheckResult.FAILED\n elif conf[key] in ([\"TCP\"], [\"UDP\"], [\"TCP_UDP\"]):\n return CheckResult.PASSED\n for idx_action, action in enumerate(conf.get(\"default_action\", [])):\n redirects = action.get(\"redirect\", [])\n for idx_redirect, redirect in enumerate(force_list(redirects)):\n if redirect.get(\"protocol\", []) == [\"HTTPS\"]:\n redirect_index = f\"[{idx_redirect}]/\" if isinstance(redirects, list) else \"\"\n self.evaluated_keys.append(f'default_action/[{idx_action}]/redirect/{redirect_index}protocol')\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = AppLoadBalancerTLS12()\n", "path": "checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py"}]}
1,451
478
gh_patches_debug_612
rasdani/github-patches
git_diff
pex-tool__pex-1502
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.53 On the docket: + [x] pex stops interpreter search if even one intepreter fails to identify itself #1494 + [x] Add support for setting custom venv prompts. #1499 + [x] How to know whether we are running from within pex #1485 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.52" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.52" +__version__ = "2.1.53"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.52\"\n+__version__ = \"2.1.53\"\n", "issue": "Release 2.1.53\nOn the docket:\r\n+ [x] pex stops interpreter search if even one intepreter fails to identify itself #1494\r\n+ [x] Add support for setting custom venv prompts. #1499\r\n+ [x] How to know whether we are running from within pex #1485 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.52\"\n", "path": "pex/version.py"}]}
663
97
gh_patches_debug_25640
rasdani/github-patches
git_diff
pytorch__ignite-410
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug in recursive attach of MetricsLambda As a result of #385, the recursive attach of MetricsLambda is not working as intended. The code below throws this error: `NotComputableError: Accuracy must have at least one example before it can be computed.` ```python import torch from ignite.metrics import Accuracy, Precision, Recall, MetricsLambda from ignite.engine import Engine accuracy_1 = Accuracy() accuracy_2 = Accuracy() mean_accuracy = (accuracy_1 + accuracy_2) / 2 metrics = { # "a1": accuracy_1, # "a2": accuracy_2, "mean accuracy": mean_accuracy, } y_pred = torch.randint(0, 2, size=(15, 10, 4)).float() y = torch.randint(0, 2, size=(15, 10, 4)).long() def update_fn(engine, batch): y_pred, y = batch return y_pred, y validator = Engine(update_fn) for name, metric in metrics.items(): metric.attach(validator, name) def data(y_pred, y): for i in range(y_pred.shape[0]): yield (y_pred[i], y[i]) d = data(y_pred, y) state = validator.run(d, max_epochs=1) print(state.metrics) ``` cc @vfdev-5 @amitibo </issue> <code> [start of ignite/metrics/metrics_lambda.py] 1 from ignite.metrics.metric import Metric 2 from ignite.engine import Events 3 4 5 class MetricsLambda(Metric): 6 """ 7 Apply a function to other metrics to obtain a new metric. 8 The result of the new metric is defined to be the result 9 of applying the function to the result of argument metrics. 10 11 When update, this metric does not recursively update the metrics 12 it depends on. When reset, all its dependency metrics would be 13 resetted. When attach, all its dependencies would be automatically 14 attached. 15 16 Args: 17 f (callable): the function that defines the computation 18 args (sequence): Sequence of other metrics or something 19 else that will be fed to ``f`` as arguments. 20 21 Example: 22 23 .. code-block:: python 24 25 precision = Precision(average=False) 26 recall = Recall(average=False) 27 28 def Fbeta(r, p, beta): 29 return torch.mean((1 + beta ** 2) * p * r / (beta ** 2 * p + r + 1e-20)).item() 30 31 F1 = MetricsLambda(Fbeta, recall, precision, 1) 32 F2 = MetricsLambda(Fbeta, recall, precision, 2) 33 F3 = MetricsLambda(Fbeta, recall, precision, 3) 34 F4 = MetricsLambda(Fbeta, recall, precision, 4) 35 """ 36 def __init__(self, f, *args): 37 self.function = f 38 self.args = args 39 super(MetricsLambda, self).__init__() 40 41 def reset(self): 42 for i in self.args: 43 if isinstance(i, Metric): 44 i.reset() 45 46 def update(self, output): 47 # NB: this method does not recursively update dependency metrics, 48 # which might cause duplicate update issue. To update this metric, 49 # users should manually update its dependencies. 50 pass 51 52 def compute(self): 53 materialized = [i.compute() if isinstance(i, Metric) else i for i in self.args] 54 return self.function(*materialized) 55 56 def attach(self, engine, name): 57 # recursively attach all its dependencies 58 for index, metric in enumerate(self.args): 59 if isinstance(metric, Metric): 60 if not engine.has_event_handler(metric.started, Events.EPOCH_STARTED): 61 engine.add_event_handler(Events.EPOCH_STARTED, metric.started) 62 if not engine.has_event_handler(metric.iteration_completed, Events.ITERATION_COMPLETED): 63 engine.add_event_handler(Events.ITERATION_COMPLETED, metric.iteration_completed) 64 super(MetricsLambda, self).attach(engine, name) 65 [end of ignite/metrics/metrics_lambda.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ignite/metrics/metrics_lambda.py b/ignite/metrics/metrics_lambda.py --- a/ignite/metrics/metrics_lambda.py +++ b/ignite/metrics/metrics_lambda.py @@ -53,12 +53,18 @@ materialized = [i.compute() if isinstance(i, Metric) else i for i in self.args] return self.function(*materialized) - def attach(self, engine, name): - # recursively attach all its dependencies + def _internal_attach(self, engine): for index, metric in enumerate(self.args): - if isinstance(metric, Metric): + if isinstance(metric, MetricsLambda): + metric._internal_attach(engine) + elif isinstance(metric, Metric): if not engine.has_event_handler(metric.started, Events.EPOCH_STARTED): engine.add_event_handler(Events.EPOCH_STARTED, metric.started) if not engine.has_event_handler(metric.iteration_completed, Events.ITERATION_COMPLETED): engine.add_event_handler(Events.ITERATION_COMPLETED, metric.iteration_completed) - super(MetricsLambda, self).attach(engine, name) + + def attach(self, engine, name): + # recursively attach all its dependencies + self._internal_attach(engine) + # attach only handler on EPOCH_COMPLETED + engine.add_event_handler(Events.EPOCH_COMPLETED, self.completed, name)
{"golden_diff": "diff --git a/ignite/metrics/metrics_lambda.py b/ignite/metrics/metrics_lambda.py\n--- a/ignite/metrics/metrics_lambda.py\n+++ b/ignite/metrics/metrics_lambda.py\n@@ -53,12 +53,18 @@\n materialized = [i.compute() if isinstance(i, Metric) else i for i in self.args]\n return self.function(*materialized)\n \n- def attach(self, engine, name):\n- # recursively attach all its dependencies\n+ def _internal_attach(self, engine):\n for index, metric in enumerate(self.args):\n- if isinstance(metric, Metric):\n+ if isinstance(metric, MetricsLambda):\n+ metric._internal_attach(engine)\n+ elif isinstance(metric, Metric):\n if not engine.has_event_handler(metric.started, Events.EPOCH_STARTED):\n engine.add_event_handler(Events.EPOCH_STARTED, metric.started)\n if not engine.has_event_handler(metric.iteration_completed, Events.ITERATION_COMPLETED):\n engine.add_event_handler(Events.ITERATION_COMPLETED, metric.iteration_completed)\n- super(MetricsLambda, self).attach(engine, name)\n+\n+ def attach(self, engine, name):\n+ # recursively attach all its dependencies\n+ self._internal_attach(engine)\n+ # attach only handler on EPOCH_COMPLETED\n+ engine.add_event_handler(Events.EPOCH_COMPLETED, self.completed, name)\n", "issue": "Bug in recursive attach of MetricsLambda\nAs a result of #385, the recursive attach of MetricsLambda is not working as intended. \r\n\r\nThe code below throws this error: `NotComputableError: Accuracy must have at least one example before it can be computed.`\r\n\r\n```python\r\nimport torch\r\n\r\nfrom ignite.metrics import Accuracy, Precision, Recall, MetricsLambda\r\nfrom ignite.engine import Engine\r\n\r\naccuracy_1 = Accuracy()\r\naccuracy_2 = Accuracy()\r\nmean_accuracy = (accuracy_1 + accuracy_2) / 2\r\n\r\n\r\nmetrics = {\r\n# \"a1\": accuracy_1,\r\n# \"a2\": accuracy_2, \r\n \"mean accuracy\": mean_accuracy,\r\n}\r\n\r\n\r\ny_pred = torch.randint(0, 2, size=(15, 10, 4)).float()\r\ny = torch.randint(0, 2, size=(15, 10, 4)).long()\r\n\r\ndef update_fn(engine, batch):\r\n y_pred, y = batch\r\n return y_pred, y\r\n\r\nvalidator = Engine(update_fn)\r\n\r\nfor name, metric in metrics.items():\r\n metric.attach(validator, name)\r\n\r\ndef data(y_pred, y):\r\n for i in range(y_pred.shape[0]):\r\n yield (y_pred[i], y[i])\r\n\r\nd = data(y_pred, y)\r\nstate = validator.run(d, max_epochs=1)\r\n\r\nprint(state.metrics)\r\n```\r\n\r\ncc @vfdev-5 @amitibo \n", "before_files": [{"content": "from ignite.metrics.metric import Metric\nfrom ignite.engine import Events\n\n\nclass MetricsLambda(Metric):\n \"\"\"\n Apply a function to other metrics to obtain a new metric.\n The result of the new metric is defined to be the result\n of applying the function to the result of argument metrics.\n\n When update, this metric does not recursively update the metrics\n it depends on. When reset, all its dependency metrics would be\n resetted. When attach, all its dependencies would be automatically\n attached.\n\n Args:\n f (callable): the function that defines the computation\n args (sequence): Sequence of other metrics or something\n else that will be fed to ``f`` as arguments.\n\n Example:\n\n .. code-block:: python\n\n precision = Precision(average=False)\n recall = Recall(average=False)\n\n def Fbeta(r, p, beta):\n return torch.mean((1 + beta ** 2) * p * r / (beta ** 2 * p + r + 1e-20)).item()\n\n F1 = MetricsLambda(Fbeta, recall, precision, 1)\n F2 = MetricsLambda(Fbeta, recall, precision, 2)\n F3 = MetricsLambda(Fbeta, recall, precision, 3)\n F4 = MetricsLambda(Fbeta, recall, precision, 4)\n \"\"\"\n def __init__(self, f, *args):\n self.function = f\n self.args = args\n super(MetricsLambda, self).__init__()\n\n def reset(self):\n for i in self.args:\n if isinstance(i, Metric):\n i.reset()\n\n def update(self, output):\n # NB: this method does not recursively update dependency metrics,\n # which might cause duplicate update issue. To update this metric,\n # users should manually update its dependencies.\n pass\n\n def compute(self):\n materialized = [i.compute() if isinstance(i, Metric) else i for i in self.args]\n return self.function(*materialized)\n\n def attach(self, engine, name):\n # recursively attach all its dependencies\n for index, metric in enumerate(self.args):\n if isinstance(metric, Metric):\n if not engine.has_event_handler(metric.started, Events.EPOCH_STARTED):\n engine.add_event_handler(Events.EPOCH_STARTED, metric.started)\n if not engine.has_event_handler(metric.iteration_completed, Events.ITERATION_COMPLETED):\n engine.add_event_handler(Events.ITERATION_COMPLETED, metric.iteration_completed)\n super(MetricsLambda, self).attach(engine, name)\n", "path": "ignite/metrics/metrics_lambda.py"}]}
1,506
296
gh_patches_debug_3602
rasdani/github-patches
git_diff
NVIDIA__apex-184
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Failing optim_wrapper due to missing Scaler argument When creating an optimizer and wrapping it via ``amp_handle.wrap_optimizer(optim)``, the handle [`calls the OptimWrapper`](https://github.com/NVIDIA/apex/blob/master/apex/amp/handle.py#L148), who wraps the optimizer and tries to instantiate a loss scaler per loss. The `OptimWrapper` tries to [instantiate the loss-scaler without an argument](https://github.com/NVIDIA/apex/blob/master/apex/amp/opt.py#L16), but the loss-scaler [needs an argument `loss_scale`](https://github.com/NVIDIA/apex/blob/master/apex/amp/scaler.py#L28), which causes the whole wrapping process to fail. </issue> <code> [start of apex/amp/opt.py] 1 import contextlib 2 import logging 3 import warnings 4 5 from .scaler import LossScaler, master_params 6 7 import numpy as np 8 9 class OptimWrapper(object): 10 def __init__(self, optimizer, amp_handle, num_loss): 11 self._optimizer = optimizer 12 self._amp_handle = amp_handle 13 self._num_loss = num_loss 14 self._loss_idx = 0 15 self._skip_next = [False] * num_loss 16 self._loss_scaler = [LossScaler() for _ in range(num_loss)] 17 18 @contextlib.contextmanager 19 def scale_loss(self, loss): 20 if not self._amp_handle.is_active(): 21 yield loss 22 return 23 24 # When there are multiple losses per-optimizer, we need 25 # to save out current grad accumulation, since we won't be 26 # able to unscale this particulare loss once the grads are 27 # all mixed together. 28 cached_grads = [] 29 if self._loss_idx > 0: 30 for p in master_params(self._optimizer): 31 if p.grad is not None: 32 cached_grads.append(p.grad.data.detach().clone()) 33 else: 34 cached_grads.append(None) 35 self._optimizer.zero_grad() 36 37 loss_scale = self._cur_loss_scaler().loss_scale() 38 yield loss * loss_scale 39 40 self._cur_loss_scaler().clear_overflow_state() 41 self._cur_loss_scaler().unscale( 42 master_params(self._optimizer), 43 master_params(self._optimizer), 44 loss_scale) 45 self._skip_next[self._loss_idx] = self._cur_loss_scaler().update_scale() 46 self._loss_idx += 1 47 48 if len(cached_grads) > 0: 49 for p, cached_grad in zip(master_params(self._optimizer), 50 cached_grads): 51 if cached_grad is not None: 52 p.grad.data.add_(cached_grad) 53 cached_grads = [] 54 55 def _cur_loss_scaler(self): 56 assert 0 <= self._loss_idx < self._num_loss 57 return self._loss_scaler[self._loss_idx] 58 59 def step(self, closure=None): 60 if not self._amp_handle.is_active(): 61 return self._optimizer.step(closure=closure) 62 63 self._loss_idx = 0 64 65 for group in self._optimizer.param_groups: 66 for p in group['params']: 67 self._amp_handle.remove_cache(p) 68 69 if closure is not None: 70 raise NotImplementedError( 71 'The `closure` argument is unsupported by the amp ' + 72 'optimizer wrapper.') 73 if any(self._skip_next): 74 logger = logging.getLogger('apex.amp') 75 logger.info('Gradient overflow, skipping update') 76 self._skip_next = [False] * self._num_loss 77 else: 78 return self._optimizer.step(closure=closure) 79 80 # Forward any attribute lookups 81 def __getattr__(self, attr): 82 return getattr(self._optimizer, attr) 83 84 # Forward all torch.optim.Optimizer methods 85 def __getstate__(self): 86 return self._optimizer.__getstate__() 87 88 def __setstate__(self): 89 return self._optimizer.__setstate__() 90 91 def __repr__(self): 92 return self._optimizer.__repr__() 93 94 def state_dict(self): 95 return self._optimizer.state_dict() 96 97 def load_state_dict(self, state_dict): 98 return self._optimizer.load_state_dict(state_dict) 99 100 def zero_grad(self): 101 return self._optimizer.zero_grad() 102 103 def add_param_group(self, param_group): 104 return self._optimizer.add_param_group(param_group) 105 [end of apex/amp/opt.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apex/amp/opt.py b/apex/amp/opt.py --- a/apex/amp/opt.py +++ b/apex/amp/opt.py @@ -13,7 +13,7 @@ self._num_loss = num_loss self._loss_idx = 0 self._skip_next = [False] * num_loss - self._loss_scaler = [LossScaler() for _ in range(num_loss)] + self._loss_scaler = [LossScaler('dynamic') for _ in range(num_loss)] @contextlib.contextmanager def scale_loss(self, loss):
{"golden_diff": "diff --git a/apex/amp/opt.py b/apex/amp/opt.py\n--- a/apex/amp/opt.py\n+++ b/apex/amp/opt.py\n@@ -13,7 +13,7 @@\n self._num_loss = num_loss\n self._loss_idx = 0\n self._skip_next = [False] * num_loss\n- self._loss_scaler = [LossScaler() for _ in range(num_loss)]\n+ self._loss_scaler = [LossScaler('dynamic') for _ in range(num_loss)]\n \n @contextlib.contextmanager\n def scale_loss(self, loss):\n", "issue": "Failing optim_wrapper due to missing Scaler argument\nWhen creating an optimizer and wrapping it via ``amp_handle.wrap_optimizer(optim)``, the handle [`calls the OptimWrapper`](https://github.com/NVIDIA/apex/blob/master/apex/amp/handle.py#L148), who wraps the optimizer and tries to instantiate a loss scaler per loss. \r\n\r\nThe `OptimWrapper` tries to [instantiate the loss-scaler without an argument](https://github.com/NVIDIA/apex/blob/master/apex/amp/opt.py#L16), but the loss-scaler [needs an argument `loss_scale`](https://github.com/NVIDIA/apex/blob/master/apex/amp/scaler.py#L28), which causes the whole wrapping process to fail.\n", "before_files": [{"content": "import contextlib\nimport logging\nimport warnings\n\nfrom .scaler import LossScaler, master_params\n\nimport numpy as np\n\nclass OptimWrapper(object):\n def __init__(self, optimizer, amp_handle, num_loss):\n self._optimizer = optimizer\n self._amp_handle = amp_handle\n self._num_loss = num_loss\n self._loss_idx = 0\n self._skip_next = [False] * num_loss\n self._loss_scaler = [LossScaler() for _ in range(num_loss)]\n\n @contextlib.contextmanager\n def scale_loss(self, loss):\n if not self._amp_handle.is_active():\n yield loss\n return\n\n # When there are multiple losses per-optimizer, we need\n # to save out current grad accumulation, since we won't be\n # able to unscale this particulare loss once the grads are\n # all mixed together.\n cached_grads = []\n if self._loss_idx > 0:\n for p in master_params(self._optimizer):\n if p.grad is not None:\n cached_grads.append(p.grad.data.detach().clone())\n else:\n cached_grads.append(None)\n self._optimizer.zero_grad()\n\n loss_scale = self._cur_loss_scaler().loss_scale()\n yield loss * loss_scale\n\n self._cur_loss_scaler().clear_overflow_state()\n self._cur_loss_scaler().unscale(\n master_params(self._optimizer),\n master_params(self._optimizer),\n loss_scale)\n self._skip_next[self._loss_idx] = self._cur_loss_scaler().update_scale()\n self._loss_idx += 1\n\n if len(cached_grads) > 0:\n for p, cached_grad in zip(master_params(self._optimizer),\n cached_grads):\n if cached_grad is not None:\n p.grad.data.add_(cached_grad)\n cached_grads = []\n\n def _cur_loss_scaler(self):\n assert 0 <= self._loss_idx < self._num_loss\n return self._loss_scaler[self._loss_idx]\n\n def step(self, closure=None):\n if not self._amp_handle.is_active():\n return self._optimizer.step(closure=closure)\n\n self._loss_idx = 0\n\n for group in self._optimizer.param_groups:\n for p in group['params']:\n self._amp_handle.remove_cache(p)\n\n if closure is not None:\n raise NotImplementedError(\n 'The `closure` argument is unsupported by the amp ' +\n 'optimizer wrapper.')\n if any(self._skip_next):\n logger = logging.getLogger('apex.amp')\n logger.info('Gradient overflow, skipping update')\n self._skip_next = [False] * self._num_loss\n else:\n return self._optimizer.step(closure=closure)\n\n # Forward any attribute lookups\n def __getattr__(self, attr):\n return getattr(self._optimizer, attr)\n\n # Forward all torch.optim.Optimizer methods\n def __getstate__(self):\n return self._optimizer.__getstate__()\n\n def __setstate__(self):\n return self._optimizer.__setstate__()\n\n def __repr__(self):\n return self._optimizer.__repr__()\n\n def state_dict(self):\n return self._optimizer.state_dict()\n\n def load_state_dict(self, state_dict):\n return self._optimizer.load_state_dict(state_dict)\n\n def zero_grad(self):\n return self._optimizer.zero_grad()\n\n def add_param_group(self, param_group):\n return self._optimizer.add_param_group(param_group)\n", "path": "apex/amp/opt.py"}]}
1,669
136
gh_patches_debug_16422
rasdani/github-patches
git_diff
qtile__qtile-4133
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `ThermalSensor` doesn't respect `foreground` in `widget_defaults` ### The issue: (reported on Discord/IRC) ThermalSensor widget doesn't respect foreground colour in widget defaults. This is because the foreground value is copied to `foreground_normal` during `__init__` but this happens before `widget_defaults` are copied to the widget (which happens in `_configure` ### Required: - [X] I have searched past issues to see if this bug has already been reported. </issue> <code> [start of libqtile/widget/sensors.py] 1 # -*- coding:utf-8 -*- 2 # Copyright (c) 2012 TiN 3 # Copyright (c) 2012, 2014 Tycho Andersen 4 # Copyright (c) 2013 Tao Sauvage 5 # Copyright (c) 2014-2015 Sean Vig 6 # Copyright (c) 2014 Adi Sieker 7 # Copyright (c) 2014 Foster McLane 8 # 9 # Permission is hereby granted, free of charge, to any person obtaining a copy 10 # of this software and associated documentation files (the "Software"), to deal 11 # in the Software without restriction, including without limitation the rights 12 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 13 # copies of the Software, and to permit persons to whom the Software is 14 # furnished to do so, subject to the following conditions: 15 # 16 # The above copyright notice and this permission notice shall be included in 17 # all copies or substantial portions of the Software. 18 # 19 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 22 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 24 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 25 # SOFTWARE. 26 27 import psutil 28 29 from libqtile.widget import base 30 31 32 class ThermalSensor(base.InLoopPollText): 33 """Widget to display temperature sensor information 34 35 For using the thermal sensor widget you need to have lm-sensors installed. 36 You can get a list of the tag_sensors executing "sensors" in your terminal. 37 Then you can choose which you want, otherwise it will display the first 38 available. 39 40 Widget requirements: psutil_. 41 42 .. _psutil: https://pypi.org/project/psutil/ 43 """ 44 45 defaults = [ 46 ( 47 "format", 48 "{temp:.1f}{unit}", 49 "Display string format. Three options available: " 50 "``{temp}`` - temperature, " 51 "``{tag}`` - tag of the temperature sensor, and " 52 "``{unit}`` - °C or °F", 53 ), 54 ("metric", True, "True to use metric/C, False to use imperial/F"), 55 ("update_interval", 2, "Update interval in seconds"), 56 ("tag_sensor", None, 'Tag of the temperature sensor. For example: "temp1" or "Core 0"'), 57 ( 58 "threshold", 59 70, 60 "If the current temperature value is above, " 61 "then change to foreground_alert colour", 62 ), 63 ("foreground_alert", "ff0000", "Foreground colour alert"), 64 ] 65 66 def __init__(self, **config): 67 base.InLoopPollText.__init__(self, **config) 68 self.add_defaults(ThermalSensor.defaults) 69 temp_values = self.get_temp_sensors() 70 self.foreground_normal = self.foreground 71 72 if temp_values is None: 73 self.data = "sensors command not found" 74 elif len(temp_values) == 0: 75 self.data = "Temperature sensors not found" 76 elif self.tag_sensor is None: 77 for k in temp_values: 78 self.tag_sensor = k 79 break 80 81 def _configure(self, qtile, bar): 82 self.unit = "°C" if self.metric else "°F" 83 base.InLoopPollText._configure(self, qtile, bar) 84 85 def get_temp_sensors(self): 86 """ 87 Reads temperatures from sys-fs via psutil. 88 Output will be read Fahrenheit if user has specified it to be. 89 """ 90 91 temperature_list = {} 92 temps = psutil.sensors_temperatures(fahrenheit=not self.metric) 93 empty_index = 0 94 for kernel_module in temps: 95 for sensor in temps[kernel_module]: 96 label = sensor.label 97 if not label: 98 label = "{}-{}".format( 99 kernel_module if kernel_module else "UNKNOWN", str(empty_index) 100 ) 101 empty_index += 1 102 temperature_list[label] = sensor.current 103 104 return temperature_list 105 106 def poll(self): 107 temp_values = self.get_temp_sensors() 108 109 # Temperature not available 110 if (temp_values is None) or (self.tag_sensor not in temp_values): 111 return "N/A" 112 113 temp_value = temp_values.get(self.tag_sensor) 114 if temp_value > self.threshold: 115 self.layout.colour = self.foreground_alert 116 else: 117 self.layout.colour = self.foreground_normal 118 119 val = dict(temp=temp_value, tag=self.tag_sensor, unit=self.unit) 120 return self.format.format(**val) 121 [end of libqtile/widget/sensors.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libqtile/widget/sensors.py b/libqtile/widget/sensors.py --- a/libqtile/widget/sensors.py +++ b/libqtile/widget/sensors.py @@ -67,7 +67,6 @@ base.InLoopPollText.__init__(self, **config) self.add_defaults(ThermalSensor.defaults) temp_values = self.get_temp_sensors() - self.foreground_normal = self.foreground if temp_values is None: self.data = "sensors command not found" @@ -81,6 +80,7 @@ def _configure(self, qtile, bar): self.unit = "°C" if self.metric else "°F" base.InLoopPollText._configure(self, qtile, bar) + self.foreground_normal = self.foreground def get_temp_sensors(self): """
{"golden_diff": "diff --git a/libqtile/widget/sensors.py b/libqtile/widget/sensors.py\n--- a/libqtile/widget/sensors.py\n+++ b/libqtile/widget/sensors.py\n@@ -67,7 +67,6 @@\n base.InLoopPollText.__init__(self, **config)\n self.add_defaults(ThermalSensor.defaults)\n temp_values = self.get_temp_sensors()\n- self.foreground_normal = self.foreground\n \n if temp_values is None:\n self.data = \"sensors command not found\"\n@@ -81,6 +80,7 @@\n def _configure(self, qtile, bar):\n self.unit = \"\u00b0C\" if self.metric else \"\u00b0F\"\n base.InLoopPollText._configure(self, qtile, bar)\n+ self.foreground_normal = self.foreground\n \n def get_temp_sensors(self):\n \"\"\"\n", "issue": "`ThermalSensor` doesn't respect `foreground` in `widget_defaults`\n### The issue:\n\n(reported on Discord/IRC)\r\n\r\nThermalSensor widget doesn't respect foreground colour in widget defaults. This is because the foreground value is copied to `foreground_normal` during `__init__` but this happens before `widget_defaults` are copied to the widget (which happens in `_configure`\n\n### Required:\n\n- [X] I have searched past issues to see if this bug has already been reported.\n", "before_files": [{"content": "# -*- coding:utf-8 -*-\n# Copyright (c) 2012 TiN\n# Copyright (c) 2012, 2014 Tycho Andersen\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2014-2015 Sean Vig\n# Copyright (c) 2014 Adi Sieker\n# Copyright (c) 2014 Foster McLane\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport psutil\n\nfrom libqtile.widget import base\n\n\nclass ThermalSensor(base.InLoopPollText):\n \"\"\"Widget to display temperature sensor information\n\n For using the thermal sensor widget you need to have lm-sensors installed.\n You can get a list of the tag_sensors executing \"sensors\" in your terminal.\n Then you can choose which you want, otherwise it will display the first\n available.\n\n Widget requirements: psutil_.\n\n .. _psutil: https://pypi.org/project/psutil/\n \"\"\"\n\n defaults = [\n (\n \"format\",\n \"{temp:.1f}{unit}\",\n \"Display string format. Three options available: \"\n \"``{temp}`` - temperature, \"\n \"``{tag}`` - tag of the temperature sensor, and \"\n \"``{unit}`` - \u00b0C or \u00b0F\",\n ),\n (\"metric\", True, \"True to use metric/C, False to use imperial/F\"),\n (\"update_interval\", 2, \"Update interval in seconds\"),\n (\"tag_sensor\", None, 'Tag of the temperature sensor. For example: \"temp1\" or \"Core 0\"'),\n (\n \"threshold\",\n 70,\n \"If the current temperature value is above, \"\n \"then change to foreground_alert colour\",\n ),\n (\"foreground_alert\", \"ff0000\", \"Foreground colour alert\"),\n ]\n\n def __init__(self, **config):\n base.InLoopPollText.__init__(self, **config)\n self.add_defaults(ThermalSensor.defaults)\n temp_values = self.get_temp_sensors()\n self.foreground_normal = self.foreground\n\n if temp_values is None:\n self.data = \"sensors command not found\"\n elif len(temp_values) == 0:\n self.data = \"Temperature sensors not found\"\n elif self.tag_sensor is None:\n for k in temp_values:\n self.tag_sensor = k\n break\n\n def _configure(self, qtile, bar):\n self.unit = \"\u00b0C\" if self.metric else \"\u00b0F\"\n base.InLoopPollText._configure(self, qtile, bar)\n\n def get_temp_sensors(self):\n \"\"\"\n Reads temperatures from sys-fs via psutil.\n Output will be read Fahrenheit if user has specified it to be.\n \"\"\"\n\n temperature_list = {}\n temps = psutil.sensors_temperatures(fahrenheit=not self.metric)\n empty_index = 0\n for kernel_module in temps:\n for sensor in temps[kernel_module]:\n label = sensor.label\n if not label:\n label = \"{}-{}\".format(\n kernel_module if kernel_module else \"UNKNOWN\", str(empty_index)\n )\n empty_index += 1\n temperature_list[label] = sensor.current\n\n return temperature_list\n\n def poll(self):\n temp_values = self.get_temp_sensors()\n\n # Temperature not available\n if (temp_values is None) or (self.tag_sensor not in temp_values):\n return \"N/A\"\n\n temp_value = temp_values.get(self.tag_sensor)\n if temp_value > self.threshold:\n self.layout.colour = self.foreground_alert\n else:\n self.layout.colour = self.foreground_normal\n\n val = dict(temp=temp_value, tag=self.tag_sensor, unit=self.unit)\n return self.format.format(**val)\n", "path": "libqtile/widget/sensors.py"}]}
1,944
191
gh_patches_debug_3993
rasdani/github-patches
git_diff
openstates__openstates-scrapers-382
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> NY: Committees don't have members Taken from bug #195 http://openstates.org/browse/ny/committees/ http://www.nysenate.gov/committee/state-native-american-relations http://assembly.state.ny.us/comm/?sec=mem&id=60 </issue> <code> [start of openstates/ny/committees.py] 1 import re 2 3 from billy.scrape import NoDataForPeriod 4 from billy.scrape.committees import CommitteeScraper, Committee 5 6 import lxml.html 7 8 9 def parse_name(name): 10 """ 11 Split a committee membership string into name and role. 12 13 >>> parse_name('Felix Ortiz') 14 ('Felix Ortiz', 'member') 15 >>> parse_name('Felix Ortiz (Chair)') 16 ('Felix Ortiz', 'chair') 17 >>> parse_name('Hon. Felix Ortiz, Co-Chair') 18 ('Felix Ortiz', 'co-chair') 19 >>> parse_name('Owen H.\\r\\nJohnson (Vice Chairperson)') 20 ('Owen H. Johnson', 'vice chairperson') 21 """ 22 name = re.sub(r'^(Hon\.|Assemblyman|Assemblywoman)\s+', '', name) 23 name = re.sub(r'\s+', ' ', name) 24 25 roles = ["Chairwoman", "Chairperson", "Chair", "Secretary", "Treasurer", 26 "Parliamentarian", "Chaplain"] 27 match = re.match( 28 r'([^(]+),? \(?((Co|Vice)?-?\s*(%s))\)?' % '|'.join(roles), 29 name) 30 31 if match: 32 name = match.group(1).strip(' ,') 33 role = match.group(2).lower() 34 return (name, role) 35 return (name, 'member') 36 37 38 class NYCommitteeScraper(CommitteeScraper): 39 state = "ny" 40 latest_only = True 41 42 def scrape(self, chamber, term): 43 getattr(self, 'scrape_' + chamber)() 44 45 def scrape_lower(self, only_names=None): 46 committees = [] 47 url = "http://assembly.state.ny.us/comm/" 48 page = self.urlopen(url) 49 page = lxml.html.fromstring(page) 50 page.make_links_absolute(url) 51 52 for link in page.xpath("//a[contains(@href, 'sec=mem')]"): 53 name = link.xpath("string(../strong)").strip() 54 if 'Caucus' in name: 55 continue 56 57 url = link.attrib['href'] 58 59 committees.append(name) 60 61 self.scrape_lower_committee(name, url) 62 return committees 63 64 def scrape_lower_committee(self, name, url): 65 page = self.urlopen(url) 66 page = lxml.html.fromstring(page) 67 68 comm = Committee('lower', name) 69 comm.add_source(url) 70 71 for link in page.xpath("//a[contains(@href, 'mem?ad')]"): 72 member = link.text.strip() 73 member = re.sub(r'\s+', ' ', member) 74 75 name, role = parse_name(member) 76 comm.add_member(name, role) 77 78 self.save_committee(comm) 79 80 def scrape_upper(self): 81 committees = [] 82 url = "http://www.nysenate.gov/committees" 83 page = self.urlopen(url) 84 page = lxml.html.fromstring(page) 85 page.make_links_absolute(url) 86 87 for link in page.xpath("//a[contains(@href, '/committee/')]"): 88 name = link.text.strip() 89 90 if name == 'New York State Conference of Black Senators': 91 # stop scraping once we reach the caucuses 92 break 93 94 committees.append(name) 95 self.scrape_upper_committee(name, link.attrib['href']) 96 return committees 97 98 def scrape_upper_committee(self, name, url): 99 page = self.urlopen(url) 100 page = lxml.html.fromstring(page) 101 102 comm = Committee('upper', name) 103 comm.add_source(url) 104 105 member_div = page.xpath("//div[@class = 'committee-members']")[0] 106 107 seen = set() 108 for link in member_div.xpath(".//a"): 109 if not link.text: 110 continue 111 112 member = link.text.strip() 113 114 next_elem = link.getnext() 115 if (next_elem is not None and 116 next_elem.tag == 'a' and 117 next_elem.attrib['href'] == link.attrib['href']): 118 # Sometimes NY is cool and splits names across a 119 # couple links 120 member = "%s %s" % (member, next_elem.text.strip()) 121 122 member = re.sub(r'\s+', ' ', member) 123 124 if member in seen or not member: 125 continue 126 seen.add(member) 127 128 name, role = parse_name(member) 129 comm.add_member(name, role) 130 131 self.save_committee(comm) 132 [end of openstates/ny/committees.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/openstates/ny/committees.py b/openstates/ny/committees.py --- a/openstates/ny/committees.py +++ b/openstates/ny/committees.py @@ -68,7 +68,7 @@ comm = Committee('lower', name) comm.add_source(url) - for link in page.xpath("//a[contains(@href, 'mem?ad')]"): + for link in page.xpath("//div[@class='commlinks']//a[contains(@href, 'mem')]"): member = link.text.strip() member = re.sub(r'\s+', ' ', member)
{"golden_diff": "diff --git a/openstates/ny/committees.py b/openstates/ny/committees.py\n--- a/openstates/ny/committees.py\n+++ b/openstates/ny/committees.py\n@@ -68,7 +68,7 @@\n comm = Committee('lower', name)\n comm.add_source(url)\n \n- for link in page.xpath(\"//a[contains(@href, 'mem?ad')]\"):\n+ for link in page.xpath(\"//div[@class='commlinks']//a[contains(@href, 'mem')]\"):\n member = link.text.strip()\n member = re.sub(r'\\s+', ' ', member)\n", "issue": "NY: Committees don't have members\nTaken from bug #195\n\nhttp://openstates.org/browse/ny/committees/\nhttp://www.nysenate.gov/committee/state-native-american-relations\nhttp://assembly.state.ny.us/comm/?sec=mem&id=60\n\n", "before_files": [{"content": "import re\n\nfrom billy.scrape import NoDataForPeriod\nfrom billy.scrape.committees import CommitteeScraper, Committee\n\nimport lxml.html\n\n\ndef parse_name(name):\n \"\"\"\n Split a committee membership string into name and role.\n\n >>> parse_name('Felix Ortiz')\n ('Felix Ortiz', 'member')\n >>> parse_name('Felix Ortiz (Chair)')\n ('Felix Ortiz', 'chair')\n >>> parse_name('Hon. Felix Ortiz, Co-Chair')\n ('Felix Ortiz', 'co-chair')\n >>> parse_name('Owen H.\\\\r\\\\nJohnson (Vice Chairperson)')\n ('Owen H. Johnson', 'vice chairperson')\n \"\"\"\n name = re.sub(r'^(Hon\\.|Assemblyman|Assemblywoman)\\s+', '', name)\n name = re.sub(r'\\s+', ' ', name)\n\n roles = [\"Chairwoman\", \"Chairperson\", \"Chair\", \"Secretary\", \"Treasurer\",\n \"Parliamentarian\", \"Chaplain\"]\n match = re.match(\n r'([^(]+),? \\(?((Co|Vice)?-?\\s*(%s))\\)?' % '|'.join(roles),\n name)\n\n if match:\n name = match.group(1).strip(' ,')\n role = match.group(2).lower()\n return (name, role)\n return (name, 'member')\n\n\nclass NYCommitteeScraper(CommitteeScraper):\n state = \"ny\"\n latest_only = True\n\n def scrape(self, chamber, term):\n getattr(self, 'scrape_' + chamber)()\n\n def scrape_lower(self, only_names=None):\n committees = []\n url = \"http://assembly.state.ny.us/comm/\"\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n for link in page.xpath(\"//a[contains(@href, 'sec=mem')]\"):\n name = link.xpath(\"string(../strong)\").strip()\n if 'Caucus' in name:\n continue\n\n url = link.attrib['href']\n\n committees.append(name)\n\n self.scrape_lower_committee(name, url)\n return committees\n\n def scrape_lower_committee(self, name, url):\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n\n comm = Committee('lower', name)\n comm.add_source(url)\n\n for link in page.xpath(\"//a[contains(@href, 'mem?ad')]\"):\n member = link.text.strip()\n member = re.sub(r'\\s+', ' ', member)\n\n name, role = parse_name(member)\n comm.add_member(name, role)\n\n self.save_committee(comm)\n\n def scrape_upper(self):\n committees = []\n url = \"http://www.nysenate.gov/committees\"\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n for link in page.xpath(\"//a[contains(@href, '/committee/')]\"):\n name = link.text.strip()\n\n if name == 'New York State Conference of Black Senators':\n # stop scraping once we reach the caucuses\n break\n\n committees.append(name)\n self.scrape_upper_committee(name, link.attrib['href'])\n return committees\n\n def scrape_upper_committee(self, name, url):\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n\n comm = Committee('upper', name)\n comm.add_source(url)\n\n member_div = page.xpath(\"//div[@class = 'committee-members']\")[0]\n\n seen = set()\n for link in member_div.xpath(\".//a\"):\n if not link.text:\n continue\n\n member = link.text.strip()\n\n next_elem = link.getnext()\n if (next_elem is not None and\n next_elem.tag == 'a' and\n next_elem.attrib['href'] == link.attrib['href']):\n # Sometimes NY is cool and splits names across a\n # couple links\n member = \"%s %s\" % (member, next_elem.text.strip())\n\n member = re.sub(r'\\s+', ' ', member)\n\n if member in seen or not member:\n continue\n seen.add(member)\n\n name, role = parse_name(member)\n comm.add_member(name, role)\n\n self.save_committee(comm)\n", "path": "openstates/ny/committees.py"}]}
1,840
138
gh_patches_debug_20787
rasdani/github-patches
git_diff
saulpw__visidata-2185
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> inputMultiple() displays a malformed empty status box **Small description** `search-keys` and `search-col` displays an empty box while waiting for the user to input a regex at the prompt. The box is shown where the usual status messages are shown. **Expected result** No box should be displayed. **Actual result with screenshot** ![sidebar-inputMultiple](https://github.com/saulpw/visidata/assets/67946319/1b068536-bd38-4953-bfc0-640920380ded) For comparison, here is what the normal status messages look like: ![sidebar-normal](https://github.com/saulpw/visidata/assets/67946319/de49d377-aaab-4b1b-9ac3-56906b7c53a3) **Steps to reproduce with sample data and a .vd** Open any sheet and hit `r` or `/`. **Additional context** saul.pw/VisiData v3.0dev Python 3.10.12 Ubuntu 22.04.3 </issue> <code> [start of setup.py] 1 #!/usr/bin/env python3 2 3 from setuptools import setup 4 # tox can't actually run python3 setup.py: https://github.com/tox-dev/tox/issues/96 5 #from visidata import __version__ 6 __version__ = '3.0dev' 7 8 setup(name='visidata', 9 version=__version__, 10 description='terminal interface for exploring and arranging tabular data', 11 long_description=open('README.md').read(), 12 long_description_content_type='text/markdown', 13 author='Saul Pwanson', 14 python_requires='>=3.7', 15 author_email='[email protected]', 16 url='https://visidata.org', 17 download_url='https://github.com/saulpw/visidata/tarball/' + __version__, 18 scripts=['bin/vd'], 19 entry_points={'console_scripts': [ 20 'visidata=visidata.main:vd_cli' 21 ], 22 }, 23 py_modules=['visidata'], 24 install_requires=[ 25 'python-dateutil', 26 'windows-curses != 2.3.1; platform_system == "Windows"', #1841 27 'importlib-metadata >= 3.6', 28 'importlib_resources; python_version<"3.9"' 29 ], 30 packages=['visidata', 'visidata.loaders', 'visidata.vendor', 'visidata.tests', 'visidata.ddw', 'visidata.man', 'visidata.themes', 'visidata.features', 'visidata.experimental', 'visidata.apps', 'visidata.apps.vgit', 'visidata.apps.vdsql', 'visidata.desktop'], 31 data_files=[('share/man/man1', ['visidata/man/vd.1', 'visidata/man/visidata.1']), ('share/applications', ['visidata/desktop/visidata.desktop'])], 32 package_data={'visidata.man': ['vd.1', 'vd.txt'], 'visidata.ddw': ['input.ddw'], 'visidata.tests': ['sample.tsv'], 'visidata.desktop': ['visidata.desktop']}, 33 license='GPLv3', 34 classifiers=[ 35 'Development Status :: 5 - Production/Stable', 36 'Environment :: Console', 37 'Environment :: Console :: Curses', 38 'Intended Audience :: Developers', 39 'Intended Audience :: Science/Research', 40 'Intended Audience :: System Administrators', 41 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 42 'Operating System :: OS Independent', 43 'Programming Language :: Python :: 3', 44 'Topic :: Database :: Front-Ends', 45 'Topic :: Scientific/Engineering', 46 'Topic :: Office/Business :: Financial :: Spreadsheet', 47 'Topic :: Scientific/Engineering :: Visualization', 48 'Topic :: Utilities', 49 ], 50 keywords=('console tabular data spreadsheet terminal viewer textpunk' 51 'curses csv hdf5 h5 xlsx excel tsv'), 52 ) 53 54 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -29,7 +29,7 @@ ], packages=['visidata', 'visidata.loaders', 'visidata.vendor', 'visidata.tests', 'visidata.ddw', 'visidata.man', 'visidata.themes', 'visidata.features', 'visidata.experimental', 'visidata.apps', 'visidata.apps.vgit', 'visidata.apps.vdsql', 'visidata.desktop'], data_files=[('share/man/man1', ['visidata/man/vd.1', 'visidata/man/visidata.1']), ('share/applications', ['visidata/desktop/visidata.desktop'])], - package_data={'visidata.man': ['vd.1', 'vd.txt'], 'visidata.ddw': ['input.ddw'], 'visidata.tests': ['sample.tsv'], 'visidata.desktop': ['visidata.desktop']}, + package_data={'visidata.man': ['vd.1', 'vd.txt'], 'visidata.ddw': ['input.ddw', 'regex.ddw'], 'visidata.tests': ['sample.tsv'], 'visidata.desktop': ['visidata.desktop']}, license='GPLv3', classifiers=[ 'Development Status :: 5 - Production/Stable',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,7 +29,7 @@\n ],\n packages=['visidata', 'visidata.loaders', 'visidata.vendor', 'visidata.tests', 'visidata.ddw', 'visidata.man', 'visidata.themes', 'visidata.features', 'visidata.experimental', 'visidata.apps', 'visidata.apps.vgit', 'visidata.apps.vdsql', 'visidata.desktop'],\n data_files=[('share/man/man1', ['visidata/man/vd.1', 'visidata/man/visidata.1']), ('share/applications', ['visidata/desktop/visidata.desktop'])],\n- package_data={'visidata.man': ['vd.1', 'vd.txt'], 'visidata.ddw': ['input.ddw'], 'visidata.tests': ['sample.tsv'], 'visidata.desktop': ['visidata.desktop']},\n+ package_data={'visidata.man': ['vd.1', 'vd.txt'], 'visidata.ddw': ['input.ddw', 'regex.ddw'], 'visidata.tests': ['sample.tsv'], 'visidata.desktop': ['visidata.desktop']},\n license='GPLv3',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n", "issue": "inputMultiple() displays a malformed empty status box\n**Small description**\r\n`search-keys` and `search-col` displays an empty box while waiting for the user to input a regex at the prompt. The box is shown where the usual status messages are shown.\r\n\r\n**Expected result**\r\nNo box should be displayed.\r\n\r\n**Actual result with screenshot**\r\n![sidebar-inputMultiple](https://github.com/saulpw/visidata/assets/67946319/1b068536-bd38-4953-bfc0-640920380ded)\r\n\r\nFor comparison, here is what the normal status messages look like:\r\n![sidebar-normal](https://github.com/saulpw/visidata/assets/67946319/de49d377-aaab-4b1b-9ac3-56906b7c53a3)\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\nOpen any sheet and hit `r` or `/`.\r\n\r\n**Additional context**\r\nsaul.pw/VisiData v3.0dev\r\nPython 3.10.12\r\nUbuntu 22.04.3\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom setuptools import setup\n# tox can't actually run python3 setup.py: https://github.com/tox-dev/tox/issues/96\n#from visidata import __version__\n__version__ = '3.0dev'\n\nsetup(name='visidata',\n version=__version__,\n description='terminal interface for exploring and arranging tabular data',\n long_description=open('README.md').read(),\n long_description_content_type='text/markdown',\n author='Saul Pwanson',\n python_requires='>=3.7',\n author_email='[email protected]',\n url='https://visidata.org',\n download_url='https://github.com/saulpw/visidata/tarball/' + __version__,\n scripts=['bin/vd'],\n entry_points={'console_scripts': [\n 'visidata=visidata.main:vd_cli'\n ],\n },\n py_modules=['visidata'],\n install_requires=[\n 'python-dateutil',\n 'windows-curses != 2.3.1; platform_system == \"Windows\"', #1841\n 'importlib-metadata >= 3.6',\n 'importlib_resources; python_version<\"3.9\"'\n ],\n packages=['visidata', 'visidata.loaders', 'visidata.vendor', 'visidata.tests', 'visidata.ddw', 'visidata.man', 'visidata.themes', 'visidata.features', 'visidata.experimental', 'visidata.apps', 'visidata.apps.vgit', 'visidata.apps.vdsql', 'visidata.desktop'],\n data_files=[('share/man/man1', ['visidata/man/vd.1', 'visidata/man/visidata.1']), ('share/applications', ['visidata/desktop/visidata.desktop'])],\n package_data={'visidata.man': ['vd.1', 'vd.txt'], 'visidata.ddw': ['input.ddw'], 'visidata.tests': ['sample.tsv'], 'visidata.desktop': ['visidata.desktop']},\n license='GPLv3',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Console :: Curses',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Utilities',\n ],\n keywords=('console tabular data spreadsheet terminal viewer textpunk'\n 'curses csv hdf5 h5 xlsx excel tsv'),\n )\n\n", "path": "setup.py"}]}
1,495
276
gh_patches_debug_19917
rasdani/github-patches
git_diff
sbi-dev__sbi-646
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SNLE + multiple independent prior + MCMC with num_workers > 1 throws error SNLE: - multiple independent prior + num_workers=2 leads to an error (ValueError: Expected value argument to be within the support of the distribution Uniform) - no error if either num_workers=1 or using a BoxUniform prior SNPE: - seems to work ``` from sbi.inference.snpe import SNPE_A from sbi.inference.snle import SNLE_A from sbi.utils import BoxUniform from sbi.inference.posteriors import MCMCPosterior from sbi.inference.potentials import posterior_estimator_based_potential, likelihood_estimator_based_potential from sbi.utils.user_input_checks import process_prior from sbi.analysis.sbc import run_sbc import torch default_params = torch.rand(size=(4, 3),dtype=torch.float32) default_obs = torch.rand(size=(4, 1), dtype=torch.float32) # multiple independent prior prior = [torch.distributions.Uniform(torch.FloatTensor([0]), torch.FloatTensor([20])), torch.distributions.Uniform(torch.FloatTensor([-10]), torch.FloatTensor([10])), torch.distributions.Uniform(torch.FloatTensor([0.5]), torch.FloatTensor([3]))] # box uniform prior # prior = BoxUniform(torch.Tensor([0, -10, 0.5]), torch.Tensor([20, 10, 3])) prior, _, _ = process_prior(prior) # inference = SNPE_A() # density_estimator = inference.append_simulations(default_params, default_obs).train() # potential_fn, theta_transform = posterior_estimator_based_potential(density_estimator, prior, default_obs[0]) inference = SNLE_A() density_estimator = inference.append_simulations(default_params, default_obs).train() potential_fn, theta_transform = likelihood_estimator_based_potential(density_estimator, prior, default_obs[0]) posterior = MCMCPosterior(potential_fn, proposal=prior, theta_transform=theta_transform) # this line throws an error ranks, dap_samples = run_sbc(default_params, default_obs, posterior, num_posterior_samples=10, num_workers=2, sbc_batch_size=2) ``` </issue> <code> [start of sbi/samplers/mcmc/init_strategy.py] 1 # This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed 2 # under the Affero General Public License v3, see <https://www.gnu.org/licenses/>. 3 4 from typing import Any, Callable 5 6 import torch 7 import torch.distributions.transforms as torch_tf 8 from torch import Tensor 9 10 11 class IterateParameters: 12 """Iterates through parameters by rows""" 13 14 def __init__(self, parameters: torch.Tensor, **kwargs): 15 self.iter = self._make_iterator(parameters) 16 17 @staticmethod 18 def _make_iterator(t): 19 for i in range(t.shape[0]): 20 yield t[i, :].reshape(1, -1) 21 22 def __call__(self): 23 return next(self.iter) 24 25 26 def proposal_init( 27 proposal: Any, transform: torch_tf.Transform, **kwargs: Any 28 ) -> Tensor: 29 """Return a sample from the proposal.""" 30 prior_samples = proposal.sample((1,)).detach() 31 transformed_prior_samples = transform(prior_samples) 32 return transformed_prior_samples 33 34 35 def sir( 36 proposal: Any, 37 potential_fn: Callable, 38 transform: torch_tf.Transform, 39 sir_num_batches: int = 10, 40 sir_batch_size: int = 1000, 41 **kwargs: Any, 42 ) -> Tensor: 43 r"""Return a sample obtained by sequential importance reweighting. 44 45 See Rubin 1988, "Using the sir algorithm to simulate posterior distributions." 46 47 This function can also do `SIR` on the conditional posterior 48 $p(\theta_i|\theta_j, x)$ when a `condition` and `dims_to_sample` are passed. 49 50 Args: 51 proposal: Proposal distribution, candidate samples are drawn from it. 52 potential_fn: Potential function that the candidate samples are weighted with. 53 Note that the function needs to return log probabilities. 54 sir_num_batches: Number of candidate batches drawn. 55 sir_batch_size: Batch size used for evaluating candidates. 56 57 Returns: 58 A single sample. 59 """ 60 61 with torch.set_grad_enabled(False): 62 log_weights = [] 63 init_param_candidates = [] 64 for i in range(sir_num_batches): 65 batch_draws = proposal.sample((sir_batch_size,)).detach() 66 transformed_batch_draws = transform(batch_draws) 67 init_param_candidates.append(transformed_batch_draws) 68 log_weights.append(potential_fn(transformed_batch_draws).detach()) 69 log_weights = torch.cat(log_weights) 70 init_param_candidates = torch.cat(init_param_candidates) 71 72 # Norm weights in log space 73 log_weights -= torch.logsumexp(log_weights, dim=0) 74 probs = torch.exp(log_weights.view(-1)) 75 probs[torch.isnan(probs)] = 0.0 76 probs[torch.isinf(probs)] = 0.0 77 probs /= probs.sum() 78 79 idxs = torch.multinomial(probs, 1, replacement=False) 80 return init_param_candidates[idxs, :] 81 [end of sbi/samplers/mcmc/init_strategy.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sbi/samplers/mcmc/init_strategy.py b/sbi/samplers/mcmc/init_strategy.py --- a/sbi/samplers/mcmc/init_strategy.py +++ b/sbi/samplers/mcmc/init_strategy.py @@ -63,9 +63,8 @@ init_param_candidates = [] for i in range(sir_num_batches): batch_draws = proposal.sample((sir_batch_size,)).detach() - transformed_batch_draws = transform(batch_draws) - init_param_candidates.append(transformed_batch_draws) - log_weights.append(potential_fn(transformed_batch_draws).detach()) + init_param_candidates.append(batch_draws) + log_weights.append(potential_fn(batch_draws).detach()) log_weights = torch.cat(log_weights) init_param_candidates = torch.cat(init_param_candidates) @@ -77,4 +76,5 @@ probs /= probs.sum() idxs = torch.multinomial(probs, 1, replacement=False) - return init_param_candidates[idxs, :] + # Return transformed sample. + return transform(init_param_candidates[idxs, :])
{"golden_diff": "diff --git a/sbi/samplers/mcmc/init_strategy.py b/sbi/samplers/mcmc/init_strategy.py\n--- a/sbi/samplers/mcmc/init_strategy.py\n+++ b/sbi/samplers/mcmc/init_strategy.py\n@@ -63,9 +63,8 @@\n init_param_candidates = []\n for i in range(sir_num_batches):\n batch_draws = proposal.sample((sir_batch_size,)).detach()\n- transformed_batch_draws = transform(batch_draws)\n- init_param_candidates.append(transformed_batch_draws)\n- log_weights.append(potential_fn(transformed_batch_draws).detach())\n+ init_param_candidates.append(batch_draws)\n+ log_weights.append(potential_fn(batch_draws).detach())\n log_weights = torch.cat(log_weights)\n init_param_candidates = torch.cat(init_param_candidates)\n \n@@ -77,4 +76,5 @@\n probs /= probs.sum()\n \n idxs = torch.multinomial(probs, 1, replacement=False)\n- return init_param_candidates[idxs, :]\n+ # Return transformed sample.\n+ return transform(init_param_candidates[idxs, :])\n", "issue": "SNLE + multiple independent prior + MCMC with num_workers > 1 throws error\nSNLE:\r\n- multiple independent prior + num_workers=2 leads to an error (ValueError: Expected value argument to be within the support of the distribution Uniform)\r\n- no error if either num_workers=1 or using a BoxUniform prior\r\n\r\nSNPE:\r\n- seems to work\r\n\r\n```\r\nfrom sbi.inference.snpe import SNPE_A\r\nfrom sbi.inference.snle import SNLE_A\r\nfrom sbi.utils import BoxUniform\r\nfrom sbi.inference.posteriors import MCMCPosterior\r\nfrom sbi.inference.potentials import posterior_estimator_based_potential, likelihood_estimator_based_potential\r\nfrom sbi.utils.user_input_checks import process_prior\r\nfrom sbi.analysis.sbc import run_sbc\r\nimport torch\r\n\r\ndefault_params = torch.rand(size=(4, 3),dtype=torch.float32)\r\ndefault_obs = torch.rand(size=(4, 1), dtype=torch.float32)\r\n\r\n# multiple independent prior\r\nprior = [torch.distributions.Uniform(torch.FloatTensor([0]), torch.FloatTensor([20])),\r\n torch.distributions.Uniform(torch.FloatTensor([-10]), torch.FloatTensor([10])),\r\n torch.distributions.Uniform(torch.FloatTensor([0.5]), torch.FloatTensor([3]))]\r\n# box uniform prior\r\n# prior = BoxUniform(torch.Tensor([0, -10, 0.5]), torch.Tensor([20, 10, 3]))\r\nprior, _, _ = process_prior(prior)\r\n\r\n# inference = SNPE_A()\r\n# density_estimator = inference.append_simulations(default_params, default_obs).train()\r\n# potential_fn, theta_transform = posterior_estimator_based_potential(density_estimator, prior, default_obs[0])\r\n\r\ninference = SNLE_A()\r\ndensity_estimator = inference.append_simulations(default_params, default_obs).train()\r\npotential_fn, theta_transform = likelihood_estimator_based_potential(density_estimator, prior, default_obs[0])\r\n\r\nposterior = MCMCPosterior(potential_fn, proposal=prior, theta_transform=theta_transform)\r\n\r\n# this line throws an error\r\nranks, dap_samples = run_sbc(default_params, default_obs, posterior, num_posterior_samples=10, num_workers=2, sbc_batch_size=2)\r\n```\r\n\n", "before_files": [{"content": "# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed\n# under the Affero General Public License v3, see <https://www.gnu.org/licenses/>.\n\nfrom typing import Any, Callable\n\nimport torch\nimport torch.distributions.transforms as torch_tf\nfrom torch import Tensor\n\n\nclass IterateParameters:\n \"\"\"Iterates through parameters by rows\"\"\"\n\n def __init__(self, parameters: torch.Tensor, **kwargs):\n self.iter = self._make_iterator(parameters)\n\n @staticmethod\n def _make_iterator(t):\n for i in range(t.shape[0]):\n yield t[i, :].reshape(1, -1)\n\n def __call__(self):\n return next(self.iter)\n\n\ndef proposal_init(\n proposal: Any, transform: torch_tf.Transform, **kwargs: Any\n) -> Tensor:\n \"\"\"Return a sample from the proposal.\"\"\"\n prior_samples = proposal.sample((1,)).detach()\n transformed_prior_samples = transform(prior_samples)\n return transformed_prior_samples\n\n\ndef sir(\n proposal: Any,\n potential_fn: Callable,\n transform: torch_tf.Transform,\n sir_num_batches: int = 10,\n sir_batch_size: int = 1000,\n **kwargs: Any,\n) -> Tensor:\n r\"\"\"Return a sample obtained by sequential importance reweighting.\n\n See Rubin 1988, \"Using the sir algorithm to simulate posterior distributions.\"\n\n This function can also do `SIR` on the conditional posterior\n $p(\\theta_i|\\theta_j, x)$ when a `condition` and `dims_to_sample` are passed.\n\n Args:\n proposal: Proposal distribution, candidate samples are drawn from it.\n potential_fn: Potential function that the candidate samples are weighted with.\n Note that the function needs to return log probabilities.\n sir_num_batches: Number of candidate batches drawn.\n sir_batch_size: Batch size used for evaluating candidates.\n\n Returns:\n A single sample.\n \"\"\"\n\n with torch.set_grad_enabled(False):\n log_weights = []\n init_param_candidates = []\n for i in range(sir_num_batches):\n batch_draws = proposal.sample((sir_batch_size,)).detach()\n transformed_batch_draws = transform(batch_draws)\n init_param_candidates.append(transformed_batch_draws)\n log_weights.append(potential_fn(transformed_batch_draws).detach())\n log_weights = torch.cat(log_weights)\n init_param_candidates = torch.cat(init_param_candidates)\n\n # Norm weights in log space\n log_weights -= torch.logsumexp(log_weights, dim=0)\n probs = torch.exp(log_weights.view(-1))\n probs[torch.isnan(probs)] = 0.0\n probs[torch.isinf(probs)] = 0.0\n probs /= probs.sum()\n\n idxs = torch.multinomial(probs, 1, replacement=False)\n return init_param_candidates[idxs, :]\n", "path": "sbi/samplers/mcmc/init_strategy.py"}]}
1,805
249
gh_patches_debug_16978
rasdani/github-patches
git_diff
fossasia__open-event-server-3183
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Mail: New Session Proposals not Sent out to Organizers and Co-organizers After the submission of several people the new Session Proposals have not been sent out to Organizers and Co-organizers. One reason could be, that these people did not verify their email address. Please check configuration and ensure all emails of all submissions are sent to organizers/co-organizers. </issue> <code> [start of app/helpers/notification_email_triggers.py] 1 from flask import url_for 2 3 from app.helpers.data_getter import DataGetter 4 from app.helpers.helpers import send_new_session_organizer, send_notif_new_session_organizer, \ 5 send_notif_session_accept_reject, send_session_accept_reject, send_schedule_change, send_notif_session_schedule, \ 6 send_email_for_after_purchase_organizers, send_notif_for_after_purchase_organizer 7 from app.models.mail import NEW_SESSION, SESSION_ACCEPT_REJECT, SESSION_SCHEDULE, TICKET_PURCHASED 8 9 10 def trigger_new_session_notifications(session_id, event_id=None, event=None): 11 if not event and not event_id: 12 raise Exception('event or event_id is required') 13 if not event: 14 event = DataGetter.get_event(event_id) 15 16 link = url_for('event_sessions.session_display_view', 17 event_id=event.id, session_id=session_id, _external=True) 18 19 admin_msg_setting = DataGetter.get_message_setting_by_action(NEW_SESSION) 20 organizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'organizer') 21 for organizer in organizers: 22 email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event_id) 23 if not admin_msg_setting or \ 24 (email_notification_setting and email_notification_setting.new_paper == 1 and 25 admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0: 26 27 send_new_session_organizer(organizer.user.email, event.name, link) 28 # Send notification 29 send_notif_new_session_organizer(organizer.user, event.name, link) 30 31 32 def trigger_session_state_change_notifications(session, event_id, state=None, message=None, subject=None): 33 if not state: 34 state = session.state 35 link = url_for('event_sessions.session_display_view', event_id=event_id, session_id=session.id, _external=True) 36 admin_msg_setting = DataGetter.get_message_setting_by_action(SESSION_ACCEPT_REJECT) 37 for speaker in session.speakers: 38 email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(speaker.user_id, event_id) 39 if not admin_msg_setting or \ 40 (email_notification_setting and email_notification_setting.session_accept_reject == 1 and 41 admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0: 42 43 if speaker.email: 44 send_session_accept_reject(speaker.email, session.title, state, link, subject=subject, message=message) 45 # Send notification 46 if speaker.user: 47 send_notif_session_accept_reject(speaker.user, session.title, state, link) 48 session.state_email_sent = True 49 from app.helpers.data import save_to_db 50 save_to_db(session) 51 52 53 def trigger_session_schedule_change_notifications(session, event_id): 54 link = url_for('event_sessions.session_display_view', event_id=event_id, session_id=session.id, _external=True) 55 admin_msg_setting = DataGetter.get_message_setting_by_action(SESSION_SCHEDULE) 56 for speaker in session.speakers: 57 email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(speaker.user_id, event_id) 58 if not admin_msg_setting or \ 59 (email_notification_setting and email_notification_setting.session_schedule == 1 and 60 admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0: 61 if speaker.email: 62 send_schedule_change(speaker.email, session.title, link) 63 # Send notification 64 if speaker.user: 65 send_notif_session_schedule(speaker.user, session.title, link) 66 67 68 def trigger_after_purchase_notifications(buyer_email, event_id, event, invoice_id, order_url): 69 if not event and not event_id: 70 raise Exception('event or event_id is required') 71 if not event: 72 event = DataGetter.get_event(event_id) 73 74 admin_msg_setting = DataGetter.get_message_setting_by_action(TICKET_PURCHASED) 75 organizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'organizer') 76 for organizer in organizers: 77 email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event_id) 78 if not admin_msg_setting or \ 79 (email_notification_setting and email_notification_setting.after_ticket_purchase == 1 and 80 admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0: 81 send_email_for_after_purchase_organizers(organizer.user.email, buyer_email, invoice_id, order_url, event.name, event.organizer_name) 82 send_notif_for_after_purchase_organizer(organizer.user, invoice_id, order_url, event.name, buyer_email) 83 84 coorganizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'coorganizer') 85 for coorganizer in coorganizers: 86 email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(coorganizer.user.id, event_id) 87 if not admin_msg_setting or \ 88 (email_notification_setting and email_notification_setting.after_ticket_purchase == 1 and 89 admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0: 90 send_email_for_after_purchase_organizers(coorganizer.user.email, buyer_email, invoice_id, order_url, event.name, event.organizer_name) 91 send_notif_for_after_purchase_organizer(organizer.user, invoice_id, order_url, event.name, buyer_email) 92 [end of app/helpers/notification_email_triggers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/helpers/notification_email_triggers.py b/app/helpers/notification_email_triggers.py --- a/app/helpers/notification_email_triggers.py +++ b/app/helpers/notification_email_triggers.py @@ -19,7 +19,7 @@ admin_msg_setting = DataGetter.get_message_setting_by_action(NEW_SESSION) organizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'organizer') for organizer in organizers: - email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event_id) + email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event.id) if not admin_msg_setting or \ (email_notification_setting and email_notification_setting.new_paper == 1 and admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:
{"golden_diff": "diff --git a/app/helpers/notification_email_triggers.py b/app/helpers/notification_email_triggers.py\n--- a/app/helpers/notification_email_triggers.py\n+++ b/app/helpers/notification_email_triggers.py\n@@ -19,7 +19,7 @@\n admin_msg_setting = DataGetter.get_message_setting_by_action(NEW_SESSION)\n organizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'organizer')\n for organizer in organizers:\n- email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event_id)\n+ email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event.id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.new_paper == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n", "issue": "Mail: New Session Proposals not Sent out to Organizers and Co-organizers\nAfter the submission of several people the new Session Proposals have not been sent out to Organizers and Co-organizers. One reason could be, that these people did not verify their email address.\r\n\r\nPlease check configuration and ensure all emails of all submissions are sent to organizers/co-organizers.\n", "before_files": [{"content": "from flask import url_for\n\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.helpers import send_new_session_organizer, send_notif_new_session_organizer, \\\n send_notif_session_accept_reject, send_session_accept_reject, send_schedule_change, send_notif_session_schedule, \\\n send_email_for_after_purchase_organizers, send_notif_for_after_purchase_organizer\nfrom app.models.mail import NEW_SESSION, SESSION_ACCEPT_REJECT, SESSION_SCHEDULE, TICKET_PURCHASED\n\n\ndef trigger_new_session_notifications(session_id, event_id=None, event=None):\n if not event and not event_id:\n raise Exception('event or event_id is required')\n if not event:\n event = DataGetter.get_event(event_id)\n\n link = url_for('event_sessions.session_display_view',\n event_id=event.id, session_id=session_id, _external=True)\n\n admin_msg_setting = DataGetter.get_message_setting_by_action(NEW_SESSION)\n organizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'organizer')\n for organizer in organizers:\n email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event_id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.new_paper == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n\n send_new_session_organizer(organizer.user.email, event.name, link)\n # Send notification\n send_notif_new_session_organizer(organizer.user, event.name, link)\n\n\ndef trigger_session_state_change_notifications(session, event_id, state=None, message=None, subject=None):\n if not state:\n state = session.state\n link = url_for('event_sessions.session_display_view', event_id=event_id, session_id=session.id, _external=True)\n admin_msg_setting = DataGetter.get_message_setting_by_action(SESSION_ACCEPT_REJECT)\n for speaker in session.speakers:\n email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(speaker.user_id, event_id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.session_accept_reject == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n\n if speaker.email:\n send_session_accept_reject(speaker.email, session.title, state, link, subject=subject, message=message)\n # Send notification\n if speaker.user:\n send_notif_session_accept_reject(speaker.user, session.title, state, link)\n session.state_email_sent = True\n from app.helpers.data import save_to_db\n save_to_db(session)\n\n\ndef trigger_session_schedule_change_notifications(session, event_id):\n link = url_for('event_sessions.session_display_view', event_id=event_id, session_id=session.id, _external=True)\n admin_msg_setting = DataGetter.get_message_setting_by_action(SESSION_SCHEDULE)\n for speaker in session.speakers:\n email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(speaker.user_id, event_id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.session_schedule == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n if speaker.email:\n send_schedule_change(speaker.email, session.title, link)\n # Send notification\n if speaker.user:\n send_notif_session_schedule(speaker.user, session.title, link)\n\n\ndef trigger_after_purchase_notifications(buyer_email, event_id, event, invoice_id, order_url):\n if not event and not event_id:\n raise Exception('event or event_id is required')\n if not event:\n event = DataGetter.get_event(event_id)\n\n admin_msg_setting = DataGetter.get_message_setting_by_action(TICKET_PURCHASED)\n organizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'organizer')\n for organizer in organizers:\n email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event_id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.after_ticket_purchase == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n send_email_for_after_purchase_organizers(organizer.user.email, buyer_email, invoice_id, order_url, event.name, event.organizer_name)\n send_notif_for_after_purchase_organizer(organizer.user, invoice_id, order_url, event.name, buyer_email)\n\n coorganizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'coorganizer')\n for coorganizer in coorganizers:\n email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(coorganizer.user.id, event_id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.after_ticket_purchase == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n send_email_for_after_purchase_organizers(coorganizer.user.email, buyer_email, invoice_id, order_url, event.name, event.organizer_name)\n send_notif_for_after_purchase_organizer(organizer.user, invoice_id, order_url, event.name, buyer_email)\n", "path": "app/helpers/notification_email_triggers.py"}]}
1,941
188
gh_patches_debug_28782
rasdani/github-patches
git_diff
Pylons__pyramid-1024
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Pyramid app frozen with py2exe fails because of Chameleon imports If I freeze a Pyramid application with py2exe and try to run it, it fails during the startup, with the following traceback: ``` Traceback (most recent call last): File "web_server.py", line 2, in <module> File "pyramid\config\__init__.pyc", line 66, in <module> File "pyramid\config\rendering.pyc", line 11, in <module> File "pyramid\chameleon_text.pyc", line 3, in <module> File "chameleon\__init__.pyc", line 1, in <module> File "chameleon\zpt\template.pyc", line 10, in <module> File "chameleon\tales.pyc", line 19, in <module> File "chameleon\compiler.pyc", line 145, in <module> File "chameleon\codegen.pyc", line 93, in template File "inspect.pyc", line 701, in getsource File "inspect.pyc", line 690, in getsourcelines File "inspect.pyc", line 538, in findsource IOError: could not get source code ``` My application doesn't use Chameleon, but the Pyramid modules still import it, which ultimately causes the .exe to fail to run. </issue> <code> [start of pyramid/chameleon_zpt.py] 1 from zope.interface import implementer 2 3 from chameleon.zpt.template import PageTemplateFile 4 5 from pyramid.interfaces import ITemplateRenderer 6 from pyramid.decorator import reify 7 from pyramid import renderers 8 9 def renderer_factory(info): 10 return renderers.template_renderer_factory(info, ZPTTemplateRenderer) 11 12 @implementer(ITemplateRenderer) 13 class ZPTTemplateRenderer(object): 14 def __init__(self, path, lookup, macro=None): 15 self.path = path 16 self.lookup = lookup 17 self.macro = macro 18 19 @reify # avoid looking up reload_templates before manager pushed 20 def template(self): 21 tf = PageTemplateFile( 22 self.path, 23 auto_reload=self.lookup.auto_reload, 24 debug=self.lookup.debug, 25 translate=self.lookup.translate 26 ) 27 if self.macro: 28 # render only the portion of the template included in a 29 # define-macro named the value of self.macro 30 macro_renderer = tf.macros[self.macro].include 31 tf._render = macro_renderer 32 return tf 33 34 def implementation(self): 35 return self.template 36 37 def __call__(self, value, system): 38 try: 39 system.update(value) 40 except (TypeError, ValueError): 41 raise ValueError('renderer was passed non-dictionary as value') 42 result = self.template(**system) 43 return result 44 45 [end of pyramid/chameleon_zpt.py] [start of pyramid/chameleon_text.py] 1 from zope.interface import implementer 2 3 from chameleon.zpt.template import PageTextTemplateFile 4 5 from pyramid.interfaces import ITemplateRenderer 6 7 from pyramid.decorator import reify 8 from pyramid import renderers 9 10 def renderer_factory(info): 11 return renderers.template_renderer_factory(info, TextTemplateRenderer) 12 13 @implementer(ITemplateRenderer) 14 class TextTemplateRenderer(object): 15 def __init__(self, path, lookup, macro=None): 16 self.path = path 17 self.lookup = lookup 18 # text template renderers have no macros, so we ignore the 19 # macro arg 20 21 @reify # avoid looking up reload_templates before manager pushed 22 def template(self): 23 return PageTextTemplateFile(self.path, 24 auto_reload=self.lookup.auto_reload, 25 debug=self.lookup.debug, 26 translate=self.lookup.translate) 27 28 def implementation(self): 29 return self.template 30 31 def __call__(self, value, system): 32 try: 33 system.update(value) 34 except (TypeError, ValueError): 35 raise ValueError('renderer was passed non-dictionary as value') 36 result = self.template(**system) 37 return result 38 39 [end of pyramid/chameleon_text.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyramid/chameleon_text.py b/pyramid/chameleon_text.py --- a/pyramid/chameleon_text.py +++ b/pyramid/chameleon_text.py @@ -1,7 +1,5 @@ from zope.interface import implementer -from chameleon.zpt.template import PageTextTemplateFile - from pyramid.interfaces import ITemplateRenderer from pyramid.decorator import reify @@ -20,6 +18,7 @@ @reify # avoid looking up reload_templates before manager pushed def template(self): + from chameleon.zpt.template import PageTextTemplateFile return PageTextTemplateFile(self.path, auto_reload=self.lookup.auto_reload, debug=self.lookup.debug, diff --git a/pyramid/chameleon_zpt.py b/pyramid/chameleon_zpt.py --- a/pyramid/chameleon_zpt.py +++ b/pyramid/chameleon_zpt.py @@ -1,7 +1,5 @@ from zope.interface import implementer -from chameleon.zpt.template import PageTemplateFile - from pyramid.interfaces import ITemplateRenderer from pyramid.decorator import reify from pyramid import renderers @@ -18,6 +16,7 @@ @reify # avoid looking up reload_templates before manager pushed def template(self): + from chameleon.zpt.template import PageTemplateFile tf = PageTemplateFile( self.path, auto_reload=self.lookup.auto_reload,
{"golden_diff": "diff --git a/pyramid/chameleon_text.py b/pyramid/chameleon_text.py\n--- a/pyramid/chameleon_text.py\n+++ b/pyramid/chameleon_text.py\n@@ -1,7 +1,5 @@\n from zope.interface import implementer\n \n-from chameleon.zpt.template import PageTextTemplateFile\n-\n from pyramid.interfaces import ITemplateRenderer\n \n from pyramid.decorator import reify\n@@ -20,6 +18,7 @@\n \n @reify # avoid looking up reload_templates before manager pushed\n def template(self):\n+ from chameleon.zpt.template import PageTextTemplateFile\n return PageTextTemplateFile(self.path,\n auto_reload=self.lookup.auto_reload,\n debug=self.lookup.debug,\ndiff --git a/pyramid/chameleon_zpt.py b/pyramid/chameleon_zpt.py\n--- a/pyramid/chameleon_zpt.py\n+++ b/pyramid/chameleon_zpt.py\n@@ -1,7 +1,5 @@\n from zope.interface import implementer\n \n-from chameleon.zpt.template import PageTemplateFile\n-\n from pyramid.interfaces import ITemplateRenderer\n from pyramid.decorator import reify\n from pyramid import renderers\n@@ -18,6 +16,7 @@\n \n @reify # avoid looking up reload_templates before manager pushed\n def template(self):\n+ from chameleon.zpt.template import PageTemplateFile\n tf = PageTemplateFile(\n self.path,\n auto_reload=self.lookup.auto_reload,\n", "issue": "Pyramid app frozen with py2exe fails because of Chameleon imports\nIf I freeze a Pyramid application with py2exe and try to run it, it fails during the startup, with the following traceback:\n\n```\nTraceback (most recent call last):\n File \"web_server.py\", line 2, in <module>\n File \"pyramid\\config\\__init__.pyc\", line 66, in <module>\n File \"pyramid\\config\\rendering.pyc\", line 11, in <module>\n File \"pyramid\\chameleon_text.pyc\", line 3, in <module>\n File \"chameleon\\__init__.pyc\", line 1, in <module>\n File \"chameleon\\zpt\\template.pyc\", line 10, in <module>\n File \"chameleon\\tales.pyc\", line 19, in <module>\n File \"chameleon\\compiler.pyc\", line 145, in <module>\n File \"chameleon\\codegen.pyc\", line 93, in template\n File \"inspect.pyc\", line 701, in getsource\n File \"inspect.pyc\", line 690, in getsourcelines\n File \"inspect.pyc\", line 538, in findsource\nIOError: could not get source code\n```\n\nMy application doesn't use Chameleon, but the Pyramid modules still import it, which ultimately causes the .exe to fail to run.\n\n", "before_files": [{"content": "from zope.interface import implementer\n\nfrom chameleon.zpt.template import PageTemplateFile\n\nfrom pyramid.interfaces import ITemplateRenderer\nfrom pyramid.decorator import reify\nfrom pyramid import renderers\n\ndef renderer_factory(info):\n return renderers.template_renderer_factory(info, ZPTTemplateRenderer)\n\n@implementer(ITemplateRenderer)\nclass ZPTTemplateRenderer(object):\n def __init__(self, path, lookup, macro=None):\n self.path = path\n self.lookup = lookup\n self.macro = macro\n\n @reify # avoid looking up reload_templates before manager pushed\n def template(self):\n tf = PageTemplateFile(\n self.path,\n auto_reload=self.lookup.auto_reload,\n debug=self.lookup.debug,\n translate=self.lookup.translate\n )\n if self.macro:\n # render only the portion of the template included in a\n # define-macro named the value of self.macro\n macro_renderer = tf.macros[self.macro].include\n tf._render = macro_renderer\n return tf\n\n def implementation(self):\n return self.template\n \n def __call__(self, value, system):\n try:\n system.update(value)\n except (TypeError, ValueError):\n raise ValueError('renderer was passed non-dictionary as value')\n result = self.template(**system)\n return result\n\n", "path": "pyramid/chameleon_zpt.py"}, {"content": "from zope.interface import implementer\n\nfrom chameleon.zpt.template import PageTextTemplateFile\n\nfrom pyramid.interfaces import ITemplateRenderer\n\nfrom pyramid.decorator import reify\nfrom pyramid import renderers\n\ndef renderer_factory(info):\n return renderers.template_renderer_factory(info, TextTemplateRenderer)\n\n@implementer(ITemplateRenderer)\nclass TextTemplateRenderer(object):\n def __init__(self, path, lookup, macro=None):\n self.path = path\n self.lookup = lookup\n # text template renderers have no macros, so we ignore the\n # macro arg\n\n @reify # avoid looking up reload_templates before manager pushed\n def template(self):\n return PageTextTemplateFile(self.path,\n auto_reload=self.lookup.auto_reload,\n debug=self.lookup.debug,\n translate=self.lookup.translate)\n\n def implementation(self):\n return self.template\n \n def __call__(self, value, system):\n try:\n system.update(value)\n except (TypeError, ValueError):\n raise ValueError('renderer was passed non-dictionary as value')\n result = self.template(**system)\n return result\n\n", "path": "pyramid/chameleon_text.py"}]}
1,550
309
gh_patches_debug_2156
rasdani/github-patches
git_diff
spacetelescope__jwql-569
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Write tests for bokeh templating software With the merge of #459, bokeh templating will be implemented for `jwql`. We should address the test coverage for this software. </issue> <code> [start of jwql/bokeh_templating/example/main.py] 1 """ 2 This is a minimal example demonstrating how to create a Bokeh app using 3 the ``bokeh-templating`` package and the associated YAML template files. 4 5 Author 6 ------- 7 8 - Graham Kanarek 9 10 Dependencies 11 ------------ 12 13 The user must have PyYAML, Bokeh, and the ``bokeh-templating`` 14 packages installed. 15 """ 16 17 import os 18 import numpy as np 19 20 from jwql.bokeh_templating import BokehTemplate 21 22 file_dir = os.path.dirname(os.path.realpath(__file__)) 23 24 25 class TestBokehApp(BokehTemplate): 26 """This is a minimal ``BokehTemplate`` app.""" 27 28 def pre_init(self): 29 """Before creating the Bokeh interface (by parsing the interface 30 file), we must initialize our ``a`` and ``b`` variables, and set 31 the path to the interface file. 32 """ 33 34 self.a, self.b = 4, 2 35 36 self.format_string = None 37 self.interface_file = os.path.join(file_dir, "example_interface.yaml") 38 39 # No post-initialization tasks are required. 40 post_init = None 41 42 @property 43 def x(self): 44 """The x-value of the Lissajous curves.""" 45 return 4. * np.sin(self.a * np.linspace(0, 2 * np.pi, 500)) 46 47 @property 48 def y(self): 49 """The y-value of the Lissajous curves.""" 50 return 3. * np.sin(self.b * np.linspace(0, 2 * np.pi, 500)) 51 52 def controller(self, attr, old, new): 53 """This is the controller function which is used to update the 54 curves when the sliders are adjusted. Note the use of the 55 ``self.refs`` dictionary for accessing the Bokeh object 56 attributes.""" 57 self.a = self.refs["a_slider"].value 58 self.b = self.refs["b_slider"].value 59 60 self.refs["figure_source"].data = {'x': self.x, 'y': self.y} 61 62 63 TestBokehApp() 64 [end of jwql/bokeh_templating/example/main.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/jwql/bokeh_templating/example/main.py b/jwql/bokeh_templating/example/main.py --- a/jwql/bokeh_templating/example/main.py +++ b/jwql/bokeh_templating/example/main.py @@ -24,6 +24,8 @@ class TestBokehApp(BokehTemplate): """This is a minimal ``BokehTemplate`` app.""" + + _embed = True def pre_init(self): """Before creating the Bokeh interface (by parsing the interface
{"golden_diff": "diff --git a/jwql/bokeh_templating/example/main.py b/jwql/bokeh_templating/example/main.py\n--- a/jwql/bokeh_templating/example/main.py\n+++ b/jwql/bokeh_templating/example/main.py\n@@ -24,6 +24,8 @@\n \n class TestBokehApp(BokehTemplate):\n \"\"\"This is a minimal ``BokehTemplate`` app.\"\"\"\n+ \n+ _embed = True\n \n def pre_init(self):\n \"\"\"Before creating the Bokeh interface (by parsing the interface\n", "issue": "Write tests for bokeh templating software\nWith the merge of #459, bokeh templating will be implemented for `jwql`. We should address the test coverage for this software. \n", "before_files": [{"content": "\"\"\"\nThis is a minimal example demonstrating how to create a Bokeh app using\nthe ``bokeh-templating`` package and the associated YAML template files.\n\nAuthor\n-------\n\n - Graham Kanarek\n\nDependencies\n------------\n\n The user must have PyYAML, Bokeh, and the ``bokeh-templating``\n packages installed.\n\"\"\"\n\nimport os\nimport numpy as np\n\nfrom jwql.bokeh_templating import BokehTemplate\n\nfile_dir = os.path.dirname(os.path.realpath(__file__))\n\n\nclass TestBokehApp(BokehTemplate):\n \"\"\"This is a minimal ``BokehTemplate`` app.\"\"\"\n\n def pre_init(self):\n \"\"\"Before creating the Bokeh interface (by parsing the interface\n file), we must initialize our ``a`` and ``b`` variables, and set\n the path to the interface file.\n \"\"\"\n\n self.a, self.b = 4, 2\n\n self.format_string = None\n self.interface_file = os.path.join(file_dir, \"example_interface.yaml\")\n\n # No post-initialization tasks are required.\n post_init = None\n\n @property\n def x(self):\n \"\"\"The x-value of the Lissajous curves.\"\"\"\n return 4. * np.sin(self.a * np.linspace(0, 2 * np.pi, 500))\n\n @property\n def y(self):\n \"\"\"The y-value of the Lissajous curves.\"\"\"\n return 3. * np.sin(self.b * np.linspace(0, 2 * np.pi, 500))\n\n def controller(self, attr, old, new):\n \"\"\"This is the controller function which is used to update the\n curves when the sliders are adjusted. Note the use of the\n ``self.refs`` dictionary for accessing the Bokeh object\n attributes.\"\"\"\n self.a = self.refs[\"a_slider\"].value\n self.b = self.refs[\"b_slider\"].value\n\n self.refs[\"figure_source\"].data = {'x': self.x, 'y': self.y}\n\n\nTestBokehApp()\n", "path": "jwql/bokeh_templating/example/main.py"}]}
1,164
126
gh_patches_debug_347
rasdani/github-patches
git_diff
kivy__python-for-android-2436
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> "diff" files are ignored during "pip install ." in `setup.py` the "diff" is not listed: https://github.com/kivy/python-for-android/blob/develop/setup.py ```python package_data = {'': ['*.tmpl', '*.patch', ], } ``` and therefore this `diff` patch: https://github.com/kivy/python-for-android/blob/develop/pythonforandroid/recipes/python3/patches/reproducible-buildinfo.diff is not installed during `pip` invocation: ```sh cd /tmp git clone --depth 1 https://github.com/kivy/python-for-android.git cd python-for-android pip install . ``` </issue> <code> [start of setup.py] 1 2 import glob 3 from io import open # for open(..,encoding=...) parameter in python 2 4 from os import walk 5 from os.path import join, dirname, sep 6 import re 7 from setuptools import setup, find_packages 8 9 # NOTE: All package data should also be set in MANIFEST.in 10 11 packages = find_packages() 12 13 package_data = {'': ['*.tmpl', 14 '*.patch', ], } 15 16 data_files = [] 17 18 19 # must be a single statement since buildozer is currently parsing it, refs: 20 # https://github.com/kivy/buildozer/issues/722 21 install_reqs = [ 22 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six', 23 'enum34; python_version<"3.4"', 'sh>=1.10; sys_platform!="nt"', 24 'pep517<0.7.0', 'toml', 25 ] 26 # (pep517 and toml are used by pythonpackage.py) 27 28 29 # By specifying every file manually, package_data will be able to 30 # include them in binary distributions. Note that we have to add 31 # everything as a 'pythonforandroid' rule, using '' apparently doesn't 32 # work. 33 def recursively_include(results, directory, patterns): 34 for root, subfolders, files in walk(directory): 35 for fn in files: 36 if not any(glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns): 37 continue 38 filename = join(root, fn) 39 directory = 'pythonforandroid' 40 if directory not in results: 41 results[directory] = [] 42 results[directory].append(join(*filename.split(sep)[1:])) 43 44 45 recursively_include(package_data, 'pythonforandroid/recipes', 46 ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h', 47 '*.mk', '*.jam', ]) 48 recursively_include(package_data, 'pythonforandroid/bootstraps', 49 ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png', 50 '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', 51 '*.gradle', '.gitkeep', 'gradlew*', '*.jar', "*.patch", ]) 52 recursively_include(package_data, 'pythonforandroid/bootstraps', 53 ['sdl-config', ]) 54 recursively_include(package_data, 'pythonforandroid/bootstraps/webview', 55 ['*.html', ]) 56 recursively_include(package_data, 'pythonforandroid', 57 ['liblink', 'biglink', 'liblink.sh']) 58 59 with open(join(dirname(__file__), 'README.md'), 60 encoding="utf-8", 61 errors="replace", 62 ) as fileh: 63 long_description = fileh.read() 64 65 init_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py') 66 version = None 67 try: 68 with open(init_filen, 69 encoding="utf-8", 70 errors="replace" 71 ) as fileh: 72 lines = fileh.readlines() 73 except IOError: 74 pass 75 else: 76 for line in lines: 77 line = line.strip() 78 if line.startswith('__version__ = '): 79 matches = re.findall(r'["\'].+["\']', line) 80 if matches: 81 version = matches[0].strip("'").strip('"') 82 break 83 if version is None: 84 raise Exception('Error: version could not be loaded from {}'.format(init_filen)) 85 86 setup(name='python-for-android', 87 version=version, 88 description='Android APK packager for Python scripts and apps', 89 long_description=long_description, 90 long_description_content_type='text/markdown', 91 python_requires=">=3.6.0", 92 author='The Kivy team', 93 author_email='[email protected]', 94 url='https://github.com/kivy/python-for-android', 95 license='MIT', 96 install_requires=install_reqs, 97 entry_points={ 98 'console_scripts': [ 99 'python-for-android = pythonforandroid.entrypoints:main', 100 'p4a = pythonforandroid.entrypoints:main', 101 ], 102 'distutils.commands': [ 103 'apk = pythonforandroid.bdistapk:BdistAPK', 104 'aar = pythonforandroid.bdistapk:BdistAAR', 105 ], 106 }, 107 classifiers=[ 108 'Development Status :: 5 - Production/Stable', 109 'Intended Audience :: Developers', 110 'License :: OSI Approved :: MIT License', 111 'Operating System :: Microsoft :: Windows', 112 'Operating System :: OS Independent', 113 'Operating System :: POSIX :: Linux', 114 'Operating System :: MacOS :: MacOS X', 115 'Operating System :: Android', 116 'Programming Language :: C', 117 'Programming Language :: Python :: 3', 118 'Programming Language :: Python :: 3.6', 119 'Programming Language :: Python :: 3.7', 120 'Programming Language :: Python :: 3.8', 121 'Topic :: Software Development', 122 'Topic :: Utilities', 123 ], 124 packages=packages, 125 package_data=package_data, 126 ) 127 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -11,7 +11,8 @@ packages = find_packages() package_data = {'': ['*.tmpl', - '*.patch', ], } + '*.patch', + '*.diff', ], } data_files = []
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -11,7 +11,8 @@\n packages = find_packages()\n \n package_data = {'': ['*.tmpl',\n- '*.patch', ], }\n+ '*.patch',\n+ '*.diff', ], }\n \n data_files = []\n", "issue": "\"diff\" files are ignored during \"pip install .\"\nin `setup.py` the \"diff\" is not listed: \r\nhttps://github.com/kivy/python-for-android/blob/develop/setup.py\r\n```python\r\npackage_data = {'': ['*.tmpl',\r\n '*.patch', ], }\r\n```\r\n\r\nand therefore this `diff` patch:\r\nhttps://github.com/kivy/python-for-android/blob/develop/pythonforandroid/recipes/python3/patches/reproducible-buildinfo.diff\r\n\r\nis not installed during `pip` invocation:\r\n```sh\r\ncd /tmp\r\ngit clone --depth 1 https://github.com/kivy/python-for-android.git\r\ncd python-for-android\r\npip install .\r\n```\r\n\n", "before_files": [{"content": "\nimport glob\nfrom io import open # for open(..,encoding=...) parameter in python 2\nfrom os import walk\nfrom os.path import join, dirname, sep\nimport re\nfrom setuptools import setup, find_packages\n\n# NOTE: All package data should also be set in MANIFEST.in\n\npackages = find_packages()\n\npackage_data = {'': ['*.tmpl',\n '*.patch', ], }\n\ndata_files = []\n\n\n# must be a single statement since buildozer is currently parsing it, refs:\n# https://github.com/kivy/buildozer/issues/722\ninstall_reqs = [\n 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',\n 'enum34; python_version<\"3.4\"', 'sh>=1.10; sys_platform!=\"nt\"',\n 'pep517<0.7.0', 'toml',\n]\n# (pep517 and toml are used by pythonpackage.py)\n\n\n# By specifying every file manually, package_data will be able to\n# include them in binary distributions. Note that we have to add\n# everything as a 'pythonforandroid' rule, using '' apparently doesn't\n# work.\ndef recursively_include(results, directory, patterns):\n for root, subfolders, files in walk(directory):\n for fn in files:\n if not any(glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns):\n continue\n filename = join(root, fn)\n directory = 'pythonforandroid'\n if directory not in results:\n results[directory] = []\n results[directory].append(join(*filename.split(sep)[1:]))\n\n\nrecursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n '*.mk', '*.jam', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',\n '*.gradle', '.gitkeep', 'gradlew*', '*.jar', \"*.patch\", ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['sdl-config', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps/webview',\n ['*.html', ])\nrecursively_include(package_data, 'pythonforandroid',\n ['liblink', 'biglink', 'liblink.sh'])\n\nwith open(join(dirname(__file__), 'README.md'),\n encoding=\"utf-8\",\n errors=\"replace\",\n ) as fileh:\n long_description = fileh.read()\n\ninit_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py')\nversion = None\ntry:\n with open(init_filen,\n encoding=\"utf-8\",\n errors=\"replace\"\n ) as fileh:\n lines = fileh.readlines()\nexcept IOError:\n pass\nelse:\n for line in lines:\n line = line.strip()\n if line.startswith('__version__ = '):\n matches = re.findall(r'[\"\\'].+[\"\\']', line)\n if matches:\n version = matches[0].strip(\"'\").strip('\"')\n break\nif version is None:\n raise Exception('Error: version could not be loaded from {}'.format(init_filen))\n\nsetup(name='python-for-android',\n version=version,\n description='Android APK packager for Python scripts and apps',\n long_description=long_description,\n long_description_content_type='text/markdown',\n python_requires=\">=3.6.0\",\n author='The Kivy team',\n author_email='[email protected]',\n url='https://github.com/kivy/python-for-android',\n license='MIT',\n install_requires=install_reqs,\n entry_points={\n 'console_scripts': [\n 'python-for-android = pythonforandroid.entrypoints:main',\n 'p4a = pythonforandroid.entrypoints:main',\n ],\n 'distutils.commands': [\n 'apk = pythonforandroid.bdistapk:BdistAPK',\n 'aar = pythonforandroid.bdistapk:BdistAAR',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Android',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n packages=packages,\n package_data=package_data,\n )\n", "path": "setup.py"}]}
2,015
72
gh_patches_debug_1073
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-98
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> EC2 resource detector hangs for a long time outside of an EC2 instance **Describe your environment** Describe any aspect of your environment relevant to the problem, including your Python version, [platform](https://docs.python.org/3/library/platform.html), version numbers of installed dependencies, information about your cloud hosting provider, etc. If you're reporting a problem with a specific version of a library in this repo, please check whether the problem has been fixed on main. The environment I initially saw this in was a container running in Docker compose on an AWS EC2 instance but I've been able to reproduce it on my laptop as well. I think it will show up in anything not directly running in AWS. **Steps to reproduce** Describe exactly how to reproduce the error. Include a code sample if applicable. The following code reproduced the issue on my laptop: ```python from opentelemetry.sdk.extension.aws.resource.ec2 import AwsEc2ResourceDetector from opentelemetry.sdk.resources import get_aggregated_resources resource = get_aggregated_resources( detectors=[AwsEc2ResourceDetector()] ) ``` **What is the expected behavior?** It should complete quickly (this is the behavior I see running on an EC2 instance). **What is the actual behavior?** What did you see instead? On my laptop, it will hand ~indefinitely. Note: one solution is just to remove the resource detector but we'd like to be able to include it and just have it fail silently, which is the behavior we've seen in other resource detectors. **Additional context** I think the problem is here: https://github.com/open-telemetry/opentelemetry-python-contrib/blob/80969a06da77d1e616124de178d12a1ebe3ffe7f/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py#L37 It looks like the request is using a 1000 _second_ timeout which I suspect is intended to be a 1000 _millisecond_ timeout. At least with the server program I've been working on that will block the startup of the program until the request completes. You can verify by running: ``` curl http://169.254.169.254/latest/api/token ``` Which is one of the requests that the resource detector makes -- it should hang indefinitely as well. </issue> <code> [start of instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 __version__ = "0.15.dev0" 16 [end of instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "0.15.dev0" +__version__ = "0.15b0"
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py\n--- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py\n+++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py\n@@ -12,4 +12,4 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-__version__ = \"0.15.dev0\"\n+__version__ = \"0.15b0\"\n", "issue": "EC2 resource detector hangs for a long time outside of an EC2 instance\n**Describe your environment** Describe any aspect of your environment relevant to the problem, including your Python version, [platform](https://docs.python.org/3/library/platform.html), version numbers of installed dependencies, information about your cloud hosting provider, etc. If you're reporting a problem with a specific version of a library in this repo, please check whether the problem has been fixed on main.\r\n\r\nThe environment I initially saw this in was a container running in Docker compose on an AWS EC2 instance but I've been able to reproduce it on my laptop as well. I think it will show up in anything not directly running in AWS.\r\n\r\n**Steps to reproduce**\r\nDescribe exactly how to reproduce the error. Include a code sample if applicable.\r\n\r\nThe following code reproduced the issue on my laptop:\r\n\r\n```python\r\nfrom opentelemetry.sdk.extension.aws.resource.ec2 import AwsEc2ResourceDetector\r\nfrom opentelemetry.sdk.resources import get_aggregated_resources\r\n\r\nresource = get_aggregated_resources(\r\n detectors=[AwsEc2ResourceDetector()]\r\n)\r\n```\r\n\r\n**What is the expected behavior?**\r\n\r\nIt should complete quickly (this is the behavior I see running on an EC2 instance).\r\n\r\n**What is the actual behavior?**\r\n\r\nWhat did you see instead?\r\n\r\nOn my laptop, it will hand ~indefinitely.\r\n\r\nNote: one solution is just to remove the resource detector but we'd like to be able to include it and just have it fail silently, which is the behavior we've seen in other resource detectors.\r\n\r\n**Additional context**\r\n\r\nI think the problem is here: https://github.com/open-telemetry/opentelemetry-python-contrib/blob/80969a06da77d1e616124de178d12a1ebe3ffe7f/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py#L37\r\n\r\nIt looks like the request is using a 1000 _second_ timeout which I suspect is intended to be a 1000 _millisecond_ timeout. At least with the server program I've been working on that will block the startup of the program until the request completes.\r\n\r\nYou can verify by running:\r\n\r\n```\r\ncurl http://169.254.169.254/latest/api/token\r\n```\r\n\r\nWhich is one of the requests that the resource detector makes -- it should hang indefinitely as well.\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__version__ = \"0.15.dev0\"\n", "path": "instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py"}]}
1,256
170
gh_patches_debug_36022
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-6357
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> yettel_bg is broken The spider does not return anything. It seems like they've moved the page for getting their store data. I wasn't able to find the new address or how the data reaches their store locator page. Store locator: https://www.yettel.bg/faq/digital-customer-service/store-locator Spider: https://github.com/alltheplaces/alltheplaces/blob/master/locations/spiders/yettel_bg.py </issue> <code> [start of locations/spiders/yettel_bg.py] 1 from scrapy import Selector, Spider 2 3 from locations.items import Feature 4 5 6 class YettelBGSpider(Spider): 7 name = "yettel_bg" 8 item_attributes = { 9 "brand": "Yettel", 10 "brand_wikidata": "Q14915070", 11 "country": "BG", 12 } 13 start_urls = ["https://www.yettel.bg/store-locator/json"] 14 15 def parse(self, response): 16 for store in response.json()["features"]: 17 item = Feature() 18 19 item["lon"], item["lat"] = store["geometry"]["coordinates"] 20 21 item["ref"] = store["properties"]["title"] 22 23 address_block = Selector(text=store["properties"]["gsl_addressfield"]) 24 25 item["street_address"] = address_block.xpath('//div[@class="thoroughfare"]/text()').get() 26 item["postcode"] = address_block.xpath('//span[@class="postal-code"]/text()').get() 27 item["city"] = address_block.xpath('//span[@class="locality"]/text()').get() 28 29 yield item 30 [end of locations/spiders/yettel_bg.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/yettel_bg.py b/locations/spiders/yettel_bg.py --- a/locations/spiders/yettel_bg.py +++ b/locations/spiders/yettel_bg.py @@ -1,5 +1,9 @@ -from scrapy import Selector, Spider +import io +from openpyxl import load_workbook +from scrapy import Spider + +from locations.hours import OpeningHours, day_range from locations.items import Feature @@ -10,20 +14,50 @@ "brand_wikidata": "Q14915070", "country": "BG", } - start_urls = ["https://www.yettel.bg/store-locator/json"] + start_urls = ["https://www.yettel.bg/faq/digital-customer-service/store-locator"] + no_refs = True + custom_settings = {"ROBOTSTXT_OBEY": False} def parse(self, response): - for store in response.json()["features"]: - item = Feature() - - item["lon"], item["lat"] = store["geometry"]["coordinates"] - - item["ref"] = store["properties"]["title"] - - address_block = Selector(text=store["properties"]["gsl_addressfield"]) - - item["street_address"] = address_block.xpath('//div[@class="thoroughfare"]/text()').get() - item["postcode"] = address_block.xpath('//span[@class="postal-code"]/text()').get() - item["city"] = address_block.xpath('//span[@class="locality"]/text()').get() - - yield item + yield response.follow( + url=response.xpath('//input[@id="hdnExcelFile"]/@value').get(), callback=self.parse_spreadsheet + ) + + def parse_spreadsheet(self, response): + if "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" in response.headers.get( + "Content-Type" + ).decode("utf-8"): + excel_file = response.body + + excel_data = io.BytesIO(excel_file) + workbook = load_workbook(excel_data, read_only=True) + + sheet = workbook.active + + data = [] + for row in sheet.iter_rows(values_only=True): + data.append(row) + + headers = data[0] + json_data = [] + for row in data[1:]: + json_data.append({headers[i]: cell for i, cell in enumerate(row)}) + + for store in json_data: + item = Feature() + + item["lat"] = store["latitude"] + item["lon"] = store["longitude"] + + item["street_address"] = store["address_loc"] + item["city"] = store["city_loc"] + + item["opening_hours"] = OpeningHours() + item["opening_hours"].add_days_range( + day_range("Mo", "Fr"), *store["working_time_weekdays"].replace(" ", "").split("-") + ) + if store["is_closed_on_saturday"] == "No": + item["opening_hours"].add_range("Sa", *store["working_time_saturday"].replace(" ", "").split("-")) + if store["is_closed_on_sunday"] == "No": + item["opening_hours"].add_range("Su", *store["working_time_sunday"].replace(" ", "").split("-")) + yield item
{"golden_diff": "diff --git a/locations/spiders/yettel_bg.py b/locations/spiders/yettel_bg.py\n--- a/locations/spiders/yettel_bg.py\n+++ b/locations/spiders/yettel_bg.py\n@@ -1,5 +1,9 @@\n-from scrapy import Selector, Spider\n+import io\n \n+from openpyxl import load_workbook\n+from scrapy import Spider\n+\n+from locations.hours import OpeningHours, day_range\n from locations.items import Feature\n \n \n@@ -10,20 +14,50 @@\n \"brand_wikidata\": \"Q14915070\",\n \"country\": \"BG\",\n }\n- start_urls = [\"https://www.yettel.bg/store-locator/json\"]\n+ start_urls = [\"https://www.yettel.bg/faq/digital-customer-service/store-locator\"]\n+ no_refs = True\n+ custom_settings = {\"ROBOTSTXT_OBEY\": False}\n \n def parse(self, response):\n- for store in response.json()[\"features\"]:\n- item = Feature()\n-\n- item[\"lon\"], item[\"lat\"] = store[\"geometry\"][\"coordinates\"]\n-\n- item[\"ref\"] = store[\"properties\"][\"title\"]\n-\n- address_block = Selector(text=store[\"properties\"][\"gsl_addressfield\"])\n-\n- item[\"street_address\"] = address_block.xpath('//div[@class=\"thoroughfare\"]/text()').get()\n- item[\"postcode\"] = address_block.xpath('//span[@class=\"postal-code\"]/text()').get()\n- item[\"city\"] = address_block.xpath('//span[@class=\"locality\"]/text()').get()\n-\n- yield item\n+ yield response.follow(\n+ url=response.xpath('//input[@id=\"hdnExcelFile\"]/@value').get(), callback=self.parse_spreadsheet\n+ )\n+\n+ def parse_spreadsheet(self, response):\n+ if \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\" in response.headers.get(\n+ \"Content-Type\"\n+ ).decode(\"utf-8\"):\n+ excel_file = response.body\n+\n+ excel_data = io.BytesIO(excel_file)\n+ workbook = load_workbook(excel_data, read_only=True)\n+\n+ sheet = workbook.active\n+\n+ data = []\n+ for row in sheet.iter_rows(values_only=True):\n+ data.append(row)\n+\n+ headers = data[0]\n+ json_data = []\n+ for row in data[1:]:\n+ json_data.append({headers[i]: cell for i, cell in enumerate(row)})\n+\n+ for store in json_data:\n+ item = Feature()\n+\n+ item[\"lat\"] = store[\"latitude\"]\n+ item[\"lon\"] = store[\"longitude\"]\n+\n+ item[\"street_address\"] = store[\"address_loc\"]\n+ item[\"city\"] = store[\"city_loc\"]\n+\n+ item[\"opening_hours\"] = OpeningHours()\n+ item[\"opening_hours\"].add_days_range(\n+ day_range(\"Mo\", \"Fr\"), *store[\"working_time_weekdays\"].replace(\" \", \"\").split(\"-\")\n+ )\n+ if store[\"is_closed_on_saturday\"] == \"No\":\n+ item[\"opening_hours\"].add_range(\"Sa\", *store[\"working_time_saturday\"].replace(\" \", \"\").split(\"-\"))\n+ if store[\"is_closed_on_sunday\"] == \"No\":\n+ item[\"opening_hours\"].add_range(\"Su\", *store[\"working_time_sunday\"].replace(\" \", \"\").split(\"-\"))\n+ yield item\n", "issue": "yettel_bg is broken\nThe spider does not return anything. It seems like they've moved the page for getting their store data. I wasn't able to find the new address or how the data reaches their store locator page.\r\n\r\nStore locator: https://www.yettel.bg/faq/digital-customer-service/store-locator\r\n\r\nSpider: https://github.com/alltheplaces/alltheplaces/blob/master/locations/spiders/yettel_bg.py\n", "before_files": [{"content": "from scrapy import Selector, Spider\n\nfrom locations.items import Feature\n\n\nclass YettelBGSpider(Spider):\n name = \"yettel_bg\"\n item_attributes = {\n \"brand\": \"Yettel\",\n \"brand_wikidata\": \"Q14915070\",\n \"country\": \"BG\",\n }\n start_urls = [\"https://www.yettel.bg/store-locator/json\"]\n\n def parse(self, response):\n for store in response.json()[\"features\"]:\n item = Feature()\n\n item[\"lon\"], item[\"lat\"] = store[\"geometry\"][\"coordinates\"]\n\n item[\"ref\"] = store[\"properties\"][\"title\"]\n\n address_block = Selector(text=store[\"properties\"][\"gsl_addressfield\"])\n\n item[\"street_address\"] = address_block.xpath('//div[@class=\"thoroughfare\"]/text()').get()\n item[\"postcode\"] = address_block.xpath('//span[@class=\"postal-code\"]/text()').get()\n item[\"city\"] = address_block.xpath('//span[@class=\"locality\"]/text()').get()\n\n yield item\n", "path": "locations/spiders/yettel_bg.py"}]}
912
757
gh_patches_debug_2714
rasdani/github-patches
git_diff
sanic-org__sanic-1397
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Logger not work. **Describe the bug** Logger did not work at current master commit (https://github.com/huge-success/sanic/commit/7d79a86d4dc48de11cd34e8ba12e41f3a9f9ff18). **Code snippet** ```python from sanic import Sanic from sanic.log import logger from sanic.response import text app = Sanic() @app.listener('before_server_start') async def setup(app, loop): logger.info('INFO') @app.get('/') async def test(request): return text('hello world') if __name__ == '__main__': app.run() ``` There is no any log/output now. **Expected behavior** At `0.8.3` release, it will logging/output some messages like: ``` [2018-11-05 17:34:47 +0800] [12112] [INFO] Goin' Fast @ http://127.0.0.1:8000 [2018-11-05 17:34:47 +0800] [12112] [INFO] INFO [2018-11-05 17:34:47 +0800] [12112] [INFO] Starting worker [12112] ``` **Environment (please complete the following information):** - OS: Ubuntu 18.04 - Version: https://github.com/huge-success/sanic/commit/7d79a86d4dc48de11cd34e8ba12e41f3a9f9ff18 **Additional context** It seems that `getLogger()` does not get the correct logger at [line 56](https://github.com/huge-success/sanic/blob/master/sanic/log.py#L56) in `log.py`. The logger is trying to get a logger named `sanic.root`, but it does not exist. Rename the logger `root` at [line 9](https://github.com/huge-success/sanic/blob/master/sanic/log.py#L9) should fix this bug. </issue> <code> [start of sanic/log.py] 1 import logging 2 import sys 3 4 5 LOGGING_CONFIG_DEFAULTS = dict( 6 version=1, 7 disable_existing_loggers=False, 8 loggers={ 9 "root": {"level": "INFO", "handlers": ["console"]}, 10 "sanic.error": { 11 "level": "INFO", 12 "handlers": ["error_console"], 13 "propagate": True, 14 "qualname": "sanic.error", 15 }, 16 "sanic.access": { 17 "level": "INFO", 18 "handlers": ["access_console"], 19 "propagate": True, 20 "qualname": "sanic.access", 21 }, 22 }, 23 handlers={ 24 "console": { 25 "class": "logging.StreamHandler", 26 "formatter": "generic", 27 "stream": sys.stdout, 28 }, 29 "error_console": { 30 "class": "logging.StreamHandler", 31 "formatter": "generic", 32 "stream": sys.stderr, 33 }, 34 "access_console": { 35 "class": "logging.StreamHandler", 36 "formatter": "access", 37 "stream": sys.stdout, 38 }, 39 }, 40 formatters={ 41 "generic": { 42 "format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s", 43 "datefmt": "[%Y-%m-%d %H:%M:%S %z]", 44 "class": "logging.Formatter", 45 }, 46 "access": { 47 "format": "%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: " 48 + "%(request)s %(message)s %(status)d %(byte)d", 49 "datefmt": "[%Y-%m-%d %H:%M:%S %z]", 50 "class": "logging.Formatter", 51 }, 52 }, 53 ) 54 55 56 logger = logging.getLogger("sanic.root") 57 error_logger = logging.getLogger("sanic.error") 58 access_logger = logging.getLogger("sanic.access") 59 [end of sanic/log.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sanic/log.py b/sanic/log.py --- a/sanic/log.py +++ b/sanic/log.py @@ -6,7 +6,7 @@ version=1, disable_existing_loggers=False, loggers={ - "root": {"level": "INFO", "handlers": ["console"]}, + "sanic.root": {"level": "INFO", "handlers": ["console"]}, "sanic.error": { "level": "INFO", "handlers": ["error_console"],
{"golden_diff": "diff --git a/sanic/log.py b/sanic/log.py\n--- a/sanic/log.py\n+++ b/sanic/log.py\n@@ -6,7 +6,7 @@\n version=1,\n disable_existing_loggers=False,\n loggers={\n- \"root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n+ \"sanic.root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n \"sanic.error\": {\n \"level\": \"INFO\",\n \"handlers\": [\"error_console\"],\n", "issue": "Logger not work.\n**Describe the bug**\r\nLogger did not work at current master commit (https://github.com/huge-success/sanic/commit/7d79a86d4dc48de11cd34e8ba12e41f3a9f9ff18).\r\n\r\n\r\n**Code snippet**\r\n```python\r\nfrom sanic import Sanic\r\nfrom sanic.log import logger\r\nfrom sanic.response import text\r\n\r\n\r\napp = Sanic()\r\n\r\[email protected]('before_server_start')\r\nasync def setup(app, loop):\r\n logger.info('INFO')\r\n\r\n\r\[email protected]('/')\r\nasync def test(request):\r\n return text('hello world')\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n```\r\nThere is no any log/output now.\r\n\r\n\r\n\r\n**Expected behavior**\r\nAt `0.8.3` release, it will logging/output some messages like:\r\n```\r\n[2018-11-05 17:34:47 +0800] [12112] [INFO] Goin' Fast @ http://127.0.0.1:8000\r\n[2018-11-05 17:34:47 +0800] [12112] [INFO] INFO\r\n[2018-11-05 17:34:47 +0800] [12112] [INFO] Starting worker [12112]\r\n```\r\n\r\n\r\n**Environment (please complete the following information):**\r\n - OS: Ubuntu 18.04\r\n - Version: https://github.com/huge-success/sanic/commit/7d79a86d4dc48de11cd34e8ba12e41f3a9f9ff18\r\n\r\n\r\n**Additional context**\r\nIt seems that `getLogger()` does not get the correct logger at [line 56](https://github.com/huge-success/sanic/blob/master/sanic/log.py#L56) in `log.py`. The logger is trying to get a logger named `sanic.root`, but it does not exist. Rename the logger `root` at [line 9](https://github.com/huge-success/sanic/blob/master/sanic/log.py#L9) should fix this bug.\r\n\n", "before_files": [{"content": "import logging\nimport sys\n\n\nLOGGING_CONFIG_DEFAULTS = dict(\n version=1,\n disable_existing_loggers=False,\n loggers={\n \"root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n \"sanic.error\": {\n \"level\": \"INFO\",\n \"handlers\": [\"error_console\"],\n \"propagate\": True,\n \"qualname\": \"sanic.error\",\n },\n \"sanic.access\": {\n \"level\": \"INFO\",\n \"handlers\": [\"access_console\"],\n \"propagate\": True,\n \"qualname\": \"sanic.access\",\n },\n },\n handlers={\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": sys.stdout,\n },\n \"error_console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": sys.stderr,\n },\n \"access_console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"access\",\n \"stream\": sys.stdout,\n },\n },\n formatters={\n \"generic\": {\n \"format\": \"%(asctime)s [%(process)d] [%(levelname)s] %(message)s\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n \"access\": {\n \"format\": \"%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: \"\n + \"%(request)s %(message)s %(status)d %(byte)d\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n },\n)\n\n\nlogger = logging.getLogger(\"sanic.root\")\nerror_logger = logging.getLogger(\"sanic.error\")\naccess_logger = logging.getLogger(\"sanic.access\")\n", "path": "sanic/log.py"}]}
1,549
115
gh_patches_debug_1097
rasdani/github-patches
git_diff
openfun__richie-290
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Person plugin form list every pages, not only Person pages ## Bug Report **Expected behavior/code** Select box in PersonPlugin form should list only extended page with Person model. **Actual Behavior** Currently the select box is listing every CMS pages. **Steps to Reproduce** 1. Edit a page; 2. Try to add a Person plugin into a placeholder which allow it; 3. Click to open the select box from opened form for added Person plugin. **Environment** - Richie version: 0.1.0 (from my own branch synchronized from master 200c8a3) - Platform: Ubuntu 18.04 LTS **Possible Solution** Adding a filter inside plugin form machinery to retain only the extend page with Person. </issue> <code> [start of src/richie/apps/persons/models.py] 1 """ 2 Declare and configure the model for the person application 3 """ 4 from django.db import models 5 from django.utils.translation import ugettext_lazy as _ 6 7 from cms.api import Page 8 from cms.extensions import PageExtension 9 from cms.models.pluginmodel import CMSPlugin 10 from parler.models import TranslatableModel, TranslatedFields 11 12 13 class PersonTitle(TranslatableModel): 14 """ 15 PersonTitle define i18ned list of people titles and there abbreviations 16 Instances of this models should only be created by CMS administrators 17 """ 18 19 translations = TranslatedFields( 20 title=models.CharField(_("Title"), max_length=200), 21 abbreviation=models.CharField(_("Title abbreviation"), max_length=10), 22 ) 23 24 class Meta: 25 verbose_name = _("person title") 26 27 def __str__(self): 28 """Human representation of a person title""" 29 return "{model}: {title} ({abbreviation})".format( 30 model=self._meta.verbose_name.title(), 31 title=self.title, 32 abbreviation=self.abbreviation, 33 ) 34 35 36 class Person(PageExtension): 37 """ 38 The person page extension represents and records people information. 39 It could be a course or news article author. 40 41 This model should be used to record structured data about the person whereas the 42 associated page object is where we record the less structured information to display on the 43 page to present the person. 44 """ 45 46 first_name = models.CharField(max_length=200, verbose_name=_("First name")) 47 last_name = models.CharField(max_length=200, verbose_name=_("Last name")) 48 49 person_title = models.ForeignKey("PersonTitle", related_name="persons") 50 51 ROOT_REVERSE_ID = "persons" 52 TEMPLATE_DETAIL = "persons/cms/person_detail.html" 53 54 class Meta: 55 verbose_name = _("person") 56 57 def __str__(self): 58 """Human representation of a person""" 59 return "{model}: {title} ({full_name})".format( 60 model=self._meta.verbose_name.title(), 61 title=self.extended_object.get_title(), 62 full_name=self.get_full_name(), 63 ) 64 65 def save(self, *args, **kwargs): 66 """ 67 Enforce validation on each instance save 68 """ 69 self.full_clean() 70 super().save(*args, **kwargs) 71 72 def get_full_name(self): 73 """ 74 Return person's full name 75 """ 76 return "{person_title} {first_name} {last_name}".format( 77 person_title=self.person_title.title, 78 first_name=self.first_name, 79 last_name=self.last_name, 80 ) 81 82 83 class PersonPluginModel(CMSPlugin): 84 """ 85 Person plugin model handles the relation from PersonPlugin 86 to their Person instance 87 """ 88 89 page = models.ForeignKey(Page) 90 91 class Meta: 92 verbose_name = _("person plugin model") 93 94 def __str__(self): 95 """Human representation of a person plugin""" 96 return "{model:s}: {id:d}".format( 97 model=self._meta.verbose_name.title(), id=self.id 98 ) 99 [end of src/richie/apps/persons/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/richie/apps/persons/models.py b/src/richie/apps/persons/models.py --- a/src/richie/apps/persons/models.py +++ b/src/richie/apps/persons/models.py @@ -86,7 +86,7 @@ to their Person instance """ - page = models.ForeignKey(Page) + page = models.ForeignKey(Page, limit_choices_to={"person__isnull": False}) class Meta: verbose_name = _("person plugin model")
{"golden_diff": "diff --git a/src/richie/apps/persons/models.py b/src/richie/apps/persons/models.py\n--- a/src/richie/apps/persons/models.py\n+++ b/src/richie/apps/persons/models.py\n@@ -86,7 +86,7 @@\n to their Person instance\n \"\"\"\n \n- page = models.ForeignKey(Page)\n+ page = models.ForeignKey(Page, limit_choices_to={\"person__isnull\": False})\n \n class Meta:\n verbose_name = _(\"person plugin model\")\n", "issue": "Person plugin form list every pages, not only Person pages\n## Bug Report\r\n\r\n**Expected behavior/code**\r\nSelect box in PersonPlugin form should list only extended page with Person model.\r\n\r\n**Actual Behavior**\r\nCurrently the select box is listing every CMS pages.\r\n\r\n**Steps to Reproduce**\r\n1. Edit a page;\r\n2. Try to add a Person plugin into a placeholder which allow it;\r\n3. Click to open the select box from opened form for added Person plugin.\r\n\r\n**Environment**\r\n- Richie version: 0.1.0 (from my own branch synchronized from master 200c8a3) \r\n- Platform: Ubuntu 18.04 LTS\r\n\r\n**Possible Solution**\r\nAdding a filter inside plugin form machinery to retain only the extend page with Person.\r\n\n", "before_files": [{"content": "\"\"\"\nDeclare and configure the model for the person application\n\"\"\"\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom cms.api import Page\nfrom cms.extensions import PageExtension\nfrom cms.models.pluginmodel import CMSPlugin\nfrom parler.models import TranslatableModel, TranslatedFields\n\n\nclass PersonTitle(TranslatableModel):\n \"\"\"\n PersonTitle define i18ned list of people titles and there abbreviations\n Instances of this models should only be created by CMS administrators\n \"\"\"\n\n translations = TranslatedFields(\n title=models.CharField(_(\"Title\"), max_length=200),\n abbreviation=models.CharField(_(\"Title abbreviation\"), max_length=10),\n )\n\n class Meta:\n verbose_name = _(\"person title\")\n\n def __str__(self):\n \"\"\"Human representation of a person title\"\"\"\n return \"{model}: {title} ({abbreviation})\".format(\n model=self._meta.verbose_name.title(),\n title=self.title,\n abbreviation=self.abbreviation,\n )\n\n\nclass Person(PageExtension):\n \"\"\"\n The person page extension represents and records people information.\n It could be a course or news article author.\n\n This model should be used to record structured data about the person whereas the\n associated page object is where we record the less structured information to display on the\n page to present the person.\n \"\"\"\n\n first_name = models.CharField(max_length=200, verbose_name=_(\"First name\"))\n last_name = models.CharField(max_length=200, verbose_name=_(\"Last name\"))\n\n person_title = models.ForeignKey(\"PersonTitle\", related_name=\"persons\")\n\n ROOT_REVERSE_ID = \"persons\"\n TEMPLATE_DETAIL = \"persons/cms/person_detail.html\"\n\n class Meta:\n verbose_name = _(\"person\")\n\n def __str__(self):\n \"\"\"Human representation of a person\"\"\"\n return \"{model}: {title} ({full_name})\".format(\n model=self._meta.verbose_name.title(),\n title=self.extended_object.get_title(),\n full_name=self.get_full_name(),\n )\n\n def save(self, *args, **kwargs):\n \"\"\"\n Enforce validation on each instance save\n \"\"\"\n self.full_clean()\n super().save(*args, **kwargs)\n\n def get_full_name(self):\n \"\"\"\n Return person's full name\n \"\"\"\n return \"{person_title} {first_name} {last_name}\".format(\n person_title=self.person_title.title,\n first_name=self.first_name,\n last_name=self.last_name,\n )\n\n\nclass PersonPluginModel(CMSPlugin):\n \"\"\"\n Person plugin model handles the relation from PersonPlugin\n to their Person instance\n \"\"\"\n\n page = models.ForeignKey(Page)\n\n class Meta:\n verbose_name = _(\"person plugin model\")\n\n def __str__(self):\n \"\"\"Human representation of a person plugin\"\"\"\n return \"{model:s}: {id:d}\".format(\n model=self._meta.verbose_name.title(), id=self.id\n )\n", "path": "src/richie/apps/persons/models.py"}]}
1,519
111
gh_patches_debug_9198
rasdani/github-patches
git_diff
sotetsuk__pgx-792
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Docs] Add PyPI description Currently, it's empty. </issue> <code> [start of setup.py] 1 from setuptools import find_packages, setup 2 3 setup( 4 name="pgx", 5 version="0.4.0", 6 long_description_content_type="text/markdown", 7 description="", 8 url="", 9 author="Sotetsu KOYAMADA", 10 author_email="[email protected]", 11 keywords="", 12 packages=find_packages(), 13 package_data={"": ["LICENSE", "*.svg"]}, 14 include_package_data=True, 15 install_requires=[ 16 "jax>=0.3.25", # JAX version on Colab (TPU) 17 "chex>=0.1.6", 18 "svgwrite", 19 "msgpack", 20 "typing_extensions" 21 ], 22 classifiers=[ 23 "Programming Language :: Python :: 3.8", 24 "Programming Language :: Python :: 3.9", 25 "Programming Language :: Python :: 3.10", 26 ], 27 ) 28 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,10 +1,14 @@ from setuptools import find_packages, setup +from pathlib import Path + +long_description = (Path(__file__).parent / "README.md").read_text() setup( name="pgx", version="0.4.0", - long_description_content_type="text/markdown", - description="", + description="GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)", + long_description=long_description, + long_description_content_type='text/markdown', url="", author="Sotetsu KOYAMADA", author_email="[email protected]",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,10 +1,14 @@\n from setuptools import find_packages, setup\n+from pathlib import Path\n+\n+long_description = (Path(__file__).parent / \"README.md\").read_text()\n \n setup(\n name=\"pgx\",\n version=\"0.4.0\",\n- long_description_content_type=\"text/markdown\",\n- description=\"\",\n+ description=\"GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)\",\n+ long_description=long_description,\n+ long_description_content_type='text/markdown',\n url=\"\",\n author=\"Sotetsu KOYAMADA\",\n author_email=\"[email protected]\",\n", "issue": "[Docs] Add PyPI description\nCurrently, it's empty.\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nsetup(\n name=\"pgx\",\n version=\"0.4.0\",\n long_description_content_type=\"text/markdown\",\n description=\"\",\n url=\"\",\n author=\"Sotetsu KOYAMADA\",\n author_email=\"[email protected]\",\n keywords=\"\",\n packages=find_packages(),\n package_data={\"\": [\"LICENSE\", \"*.svg\"]},\n include_package_data=True,\n install_requires=[\n \"jax>=0.3.25\", # JAX version on Colab (TPU)\n \"chex>=0.1.6\",\n \"svgwrite\",\n \"msgpack\",\n \"typing_extensions\"\n ],\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}]}
787
172
gh_patches_debug_27075
rasdani/github-patches
git_diff
chainer__chainer-478
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: Reshape() with -1 fails When calling the chainer function Reshape() with a -1 value (e.g. Reshape((-1, 10, 10))) I get an error: ``` InvalidType: Expect: prod(in_types[0].shape) == prod((-1, 10, 10)) Actual: 2800 != -100 ``` The cupy function reshape does support -1 arguments, so the error seems to be just the check in chainer's Reshape. The problematic check is: _type_check_prod(in_types[0].shape) == _type_check_prod(self.shape) If a -1 argument is present, it should instead check if the size of in_types[0] is divisible by the product of the axis-lengths that are not -1. I would send a PR but I'm not exactly clear on the correct usage of type_check.expect, and had some issues with it in the past. Should be simple enough to fix though. </issue> <code> [start of chainer/functions/array/reshape.py] 1 import numpy 2 3 from chainer import function 4 from chainer.utils import type_check 5 6 7 _type_check_prod = type_check.Variable(numpy.prod, 'prod') 8 9 10 class Reshape(function.Function): 11 12 """Reshapes an input array without copy.""" 13 14 def __init__(self, shape): 15 self.shape = shape 16 17 def check_type_forward(self, in_types): 18 type_check.expect( 19 in_types.size() == 1, 20 _type_check_prod(in_types[0].shape) == 21 _type_check_prod(self.shape) 22 ) 23 24 def forward(self, x): 25 return x[0].reshape(self.shape), 26 27 def backward(self, x, gy): 28 return gy[0].reshape(x[0].shape), 29 30 31 def reshape(x, shape): 32 """Reshapes an input variable without copy. 33 34 Args: 35 x (~chainer.Variable): Input variable. 36 shape (tuple of ints): Target shape. 37 38 Returns: 39 ~chainer.Variable: Variable that holds a reshaped version of the input 40 variable. 41 42 """ 43 return Reshape(shape)(x) 44 [end of chainer/functions/array/reshape.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/functions/array/reshape.py b/chainer/functions/array/reshape.py --- a/chainer/functions/array/reshape.py +++ b/chainer/functions/array/reshape.py @@ -1,10 +1,12 @@ -import numpy - from chainer import function from chainer.utils import type_check -_type_check_prod = type_check.Variable(numpy.prod, 'prod') +def _count_unknown_dims(shape): + cnt = 0 + for dim in shape: + cnt += dim < 0 + return cnt class Reshape(function.Function): @@ -12,15 +14,32 @@ """Reshapes an input array without copy.""" def __init__(self, shape): + cnt = _count_unknown_dims(shape) + assert cnt == 0 or cnt == 1 + self.shape = shape def check_type_forward(self, in_types): type_check.expect( in_types.size() == 1, - _type_check_prod(in_types[0].shape) == - _type_check_prod(self.shape) ) + x_type, = in_types + + cnt = _count_unknown_dims(self.shape) + if cnt == 0: + type_check.expect( + type_check.prod(x_type.shape) == type_check.prod(self.shape)) + else: + known_size = 1 + for s in self.shape: + if s > 0: + known_size *= s + size_var = type_check.Variable(known_size, + 'known_size(=%d)' % known_size) + type_check.expect( + type_check.prod(x_type.shape) % size_var == 0) + def forward(self, x): return x[0].reshape(self.shape),
{"golden_diff": "diff --git a/chainer/functions/array/reshape.py b/chainer/functions/array/reshape.py\n--- a/chainer/functions/array/reshape.py\n+++ b/chainer/functions/array/reshape.py\n@@ -1,10 +1,12 @@\n-import numpy\n-\n from chainer import function\n from chainer.utils import type_check\n \n \n-_type_check_prod = type_check.Variable(numpy.prod, 'prod')\n+def _count_unknown_dims(shape):\n+ cnt = 0\n+ for dim in shape:\n+ cnt += dim < 0\n+ return cnt\n \n \n class Reshape(function.Function):\n@@ -12,15 +14,32 @@\n \"\"\"Reshapes an input array without copy.\"\"\"\n \n def __init__(self, shape):\n+ cnt = _count_unknown_dims(shape)\n+ assert cnt == 0 or cnt == 1\n+\n self.shape = shape\n \n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1,\n- _type_check_prod(in_types[0].shape) ==\n- _type_check_prod(self.shape)\n )\n \n+ x_type, = in_types\n+\n+ cnt = _count_unknown_dims(self.shape)\n+ if cnt == 0:\n+ type_check.expect(\n+ type_check.prod(x_type.shape) == type_check.prod(self.shape))\n+ else:\n+ known_size = 1\n+ for s in self.shape:\n+ if s > 0:\n+ known_size *= s\n+ size_var = type_check.Variable(known_size,\n+ 'known_size(=%d)' % known_size)\n+ type_check.expect(\n+ type_check.prod(x_type.shape) % size_var == 0)\n+\n def forward(self, x):\n return x[0].reshape(self.shape),\n", "issue": "Bug: Reshape() with -1 fails\nWhen calling the chainer function Reshape() with a -1 value (e.g. Reshape((-1, 10, 10))) I get an error: \n\n```\nInvalidType: Expect: prod(in_types[0].shape) == prod((-1, 10, 10))\nActual: 2800 != -100\n```\n\nThe cupy function reshape does support -1 arguments, so the error seems to be just the check in chainer's Reshape. The problematic check is:\n _type_check_prod(in_types[0].shape) == _type_check_prod(self.shape)\n\nIf a -1 argument is present, it should instead check if the size of in_types[0] is divisible by the product of the axis-lengths that are not -1. \n\nI would send a PR but I'm not exactly clear on the correct usage of type_check.expect, and had some issues with it in the past. Should be simple enough to fix though.\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\n_type_check_prod = type_check.Variable(numpy.prod, 'prod')\n\n\nclass Reshape(function.Function):\n\n \"\"\"Reshapes an input array without copy.\"\"\"\n\n def __init__(self, shape):\n self.shape = shape\n\n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1,\n _type_check_prod(in_types[0].shape) ==\n _type_check_prod(self.shape)\n )\n\n def forward(self, x):\n return x[0].reshape(self.shape),\n\n def backward(self, x, gy):\n return gy[0].reshape(x[0].shape),\n\n\ndef reshape(x, shape):\n \"\"\"Reshapes an input variable without copy.\n\n Args:\n x (~chainer.Variable): Input variable.\n shape (tuple of ints): Target shape.\n\n Returns:\n ~chainer.Variable: Variable that holds a reshaped version of the input\n variable.\n\n \"\"\"\n return Reshape(shape)(x)\n", "path": "chainer/functions/array/reshape.py"}]}
1,074
395
gh_patches_debug_13895
rasdani/github-patches
git_diff
ethereum__web3.py-2587
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Web3 instantiation from subclass throws error * Version: 5.30.0 * Python: 3.8 * OS: osx * pip freeze output is intentionally omitted. ### What was wrong? We use a sub-class of Web3, for the purpose of this example, call it SubclassWeb3, as follows: ``` class SubclassWeb3(Web3): def __init__(self, ....): ... do bunch of stuff... super().__init__(*args, **kwargs) ... do bunch more stuff... ``` This setup worked fine with web3 version == 5.23.1. However, switching to web3==5.30.0 breaks this, with error ``` [rest of callstack omitted] File “[redacted]/web3/main.py”, line 251, in __init__ self.attach_modules(modules) File “[redacted]/web3/main.py”, line 340, in attach_modules _attach_modules(self, modules) File “[redacted]/web3/_utils/module.py”, line 68, in attach_modules setattr(parent_module, module_name, module_class(w3)) File “[redacted]/web3/module.py”, line 93, in __init__ self.codec: ABICodec = web3.codec AttributeError: ‘NoneType’ object has no attribute ‘codec’ ``` This used to work in older versions because the parent_module is assumed to be a Web3 object and stored accordingly - [see logic](https://github.com/ethereum/web3.py/blob/2b229d2e16792e52bb47108e665c6a8937c188a2/web3/_utils/module.py#L33-L35). Compare this to the somewhat brittle name checking that happens in 5.30.0 - [see logic](https://github.com/ethereum/web3.py/blob/ee5f0543ad91e4ba991997e35f50719ceb58135d/web3/_utils/module.py#L58-L60). This doesn't quite work because if this logic is being executed from a subclass of Web3, the name of the parent module appears as SubclassWeb3 (and not Web3). ### How can it be fixed? Fill this section in if you know how this could or should be fixed. </issue> <code> [start of web3/_utils/module.py] 1 import inspect 2 from io import ( 3 UnsupportedOperation, 4 ) 5 from typing import ( 6 TYPE_CHECKING, 7 Any, 8 Dict, 9 List, 10 Optional, 11 Sequence, 12 Union, 13 ) 14 15 from web3.exceptions import ( 16 ValidationError, 17 ) 18 from web3.module import ( 19 Module, 20 ) 21 22 if TYPE_CHECKING: 23 from web3 import Web3 # noqa: F401 24 25 26 def _validate_init_params_and_return_if_found(module_class: Any) -> List[str]: 27 init_params_raw = list(inspect.signature(module_class.__init__).parameters) 28 module_init_params = [ 29 param for param in init_params_raw if param not in ['self', 'args', 'kwargs'] 30 ] 31 32 if len(module_init_params) > 1: 33 raise UnsupportedOperation( 34 "A module class may accept a single `Web3` instance as the first argument of its " 35 f"__init__() method. More than one argument found for {module_class.__name__}: " 36 f"{module_init_params}" 37 ) 38 39 return module_init_params 40 41 42 def attach_modules( 43 parent_module: Union["Web3", "Module"], 44 module_definitions: Dict[str, Any], 45 w3: Optional[Union["Web3", "Module"]] = None 46 ) -> None: 47 for module_name, module_info in module_definitions.items(): 48 module_info_is_list_like = isinstance(module_info, Sequence) 49 50 module_class = module_info[0] if module_info_is_list_like else module_info 51 52 if hasattr(parent_module, module_name): 53 raise AttributeError( 54 f"Cannot set {parent_module} module named '{module_name}'. The web3 object " 55 "already has an attribute with that name" 56 ) 57 58 # The parent module is the ``Web3`` instance on first run of the loop 59 if type(parent_module).__name__ == 'Web3': 60 w3 = parent_module 61 62 module_init_params = _validate_init_params_and_return_if_found(module_class) 63 if len(module_init_params) == 1: 64 # Modules that need access to the ``Web3`` instance may accept the instance as the first 65 # arg in their ``__init__()`` method. This is the case for any module that inherits from 66 # ``web3.module.Module``. 67 # e.g. def __init__(self, w3): 68 setattr(parent_module, module_name, module_class(w3)) 69 else: 70 # Modules need not take in a ``Web3`` instance in their ``__init__()`` if not needed 71 setattr(parent_module, module_name, module_class()) 72 73 if module_info_is_list_like: 74 if len(module_info) == 2: 75 submodule_definitions = module_info[1] 76 module = getattr(parent_module, module_name) 77 attach_modules(module, submodule_definitions, w3) 78 elif len(module_info) != 1: 79 raise ValidationError("Module definitions can only have 1 or 2 elements.") 80 [end of web3/_utils/module.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/web3/_utils/module.py b/web3/_utils/module.py --- a/web3/_utils/module.py +++ b/web3/_utils/module.py @@ -55,9 +55,13 @@ "already has an attribute with that name" ) - # The parent module is the ``Web3`` instance on first run of the loop - if type(parent_module).__name__ == 'Web3': - w3 = parent_module + # The parent module is the ``Web3`` instance on first run of the loop and w3 is + # None. Thus, set w3 to the parent_module. The import needs to happen locally + # due to circular import issues. + if w3 is None: + from web3 import Web3 + if isinstance(parent_module, Web3): + w3 = parent_module module_init_params = _validate_init_params_and_return_if_found(module_class) if len(module_init_params) == 1:
{"golden_diff": "diff --git a/web3/_utils/module.py b/web3/_utils/module.py\n--- a/web3/_utils/module.py\n+++ b/web3/_utils/module.py\n@@ -55,9 +55,13 @@\n \"already has an attribute with that name\"\n )\n \n- # The parent module is the ``Web3`` instance on first run of the loop\n- if type(parent_module).__name__ == 'Web3':\n- w3 = parent_module\n+ # The parent module is the ``Web3`` instance on first run of the loop and w3 is\n+ # None. Thus, set w3 to the parent_module. The import needs to happen locally\n+ # due to circular import issues.\n+ if w3 is None:\n+ from web3 import Web3\n+ if isinstance(parent_module, Web3):\n+ w3 = parent_module\n \n module_init_params = _validate_init_params_and_return_if_found(module_class)\n if len(module_init_params) == 1:\n", "issue": "Web3 instantiation from subclass throws error\n* Version: 5.30.0\r\n* Python: 3.8\r\n* OS: osx\r\n* pip freeze output is intentionally omitted.\r\n\r\n### What was wrong?\r\nWe use a sub-class of Web3, for the purpose of this example, call it SubclassWeb3, as follows:\r\n\r\n```\r\nclass SubclassWeb3(Web3):\r\ndef __init__(self, ....):\r\n ... do bunch of stuff...\r\n super().__init__(*args, **kwargs)\r\n ... do bunch more stuff...\r\n```\r\n\r\nThis setup worked fine with web3 version == 5.23.1. However, switching to web3==5.30.0 breaks this, with error\r\n\r\n```\r\n[rest of callstack omitted]\r\n File \u201c[redacted]/web3/main.py\u201d, line 251, in __init__\r\n self.attach_modules(modules)\r\n File \u201c[redacted]/web3/main.py\u201d, line 340, in attach_modules\r\n _attach_modules(self, modules)\r\n File \u201c[redacted]/web3/_utils/module.py\u201d, line 68, in attach_modules\r\n setattr(parent_module, module_name, module_class(w3))\r\n File \u201c[redacted]/web3/module.py\u201d, line 93, in __init__\r\n self.codec: ABICodec = web3.codec\r\nAttributeError: \u2018NoneType\u2019 object has no attribute \u2018codec\u2019\r\n```\r\n\r\nThis used to work in older versions because the parent_module is assumed to be a Web3 object and stored accordingly - [see logic](https://github.com/ethereum/web3.py/blob/2b229d2e16792e52bb47108e665c6a8937c188a2/web3/_utils/module.py#L33-L35).\r\n\r\nCompare this to the somewhat brittle name checking that happens in 5.30.0 - [see logic](https://github.com/ethereum/web3.py/blob/ee5f0543ad91e4ba991997e35f50719ceb58135d/web3/_utils/module.py#L58-L60). This doesn't quite work because if this logic is being executed from a subclass of Web3, the name of the parent module appears as SubclassWeb3 (and not Web3).\r\n\r\n### How can it be fixed?\r\n\r\nFill this section in if you know how this could or should be fixed.\n", "before_files": [{"content": "import inspect\nfrom io import (\n UnsupportedOperation,\n)\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n List,\n Optional,\n Sequence,\n Union,\n)\n\nfrom web3.exceptions import (\n ValidationError,\n)\nfrom web3.module import (\n Module,\n)\n\nif TYPE_CHECKING:\n from web3 import Web3 # noqa: F401\n\n\ndef _validate_init_params_and_return_if_found(module_class: Any) -> List[str]:\n init_params_raw = list(inspect.signature(module_class.__init__).parameters)\n module_init_params = [\n param for param in init_params_raw if param not in ['self', 'args', 'kwargs']\n ]\n\n if len(module_init_params) > 1:\n raise UnsupportedOperation(\n \"A module class may accept a single `Web3` instance as the first argument of its \"\n f\"__init__() method. More than one argument found for {module_class.__name__}: \"\n f\"{module_init_params}\"\n )\n\n return module_init_params\n\n\ndef attach_modules(\n parent_module: Union[\"Web3\", \"Module\"],\n module_definitions: Dict[str, Any],\n w3: Optional[Union[\"Web3\", \"Module\"]] = None\n) -> None:\n for module_name, module_info in module_definitions.items():\n module_info_is_list_like = isinstance(module_info, Sequence)\n\n module_class = module_info[0] if module_info_is_list_like else module_info\n\n if hasattr(parent_module, module_name):\n raise AttributeError(\n f\"Cannot set {parent_module} module named '{module_name}'. The web3 object \"\n \"already has an attribute with that name\"\n )\n\n # The parent module is the ``Web3`` instance on first run of the loop\n if type(parent_module).__name__ == 'Web3':\n w3 = parent_module\n\n module_init_params = _validate_init_params_and_return_if_found(module_class)\n if len(module_init_params) == 1:\n # Modules that need access to the ``Web3`` instance may accept the instance as the first\n # arg in their ``__init__()`` method. This is the case for any module that inherits from\n # ``web3.module.Module``.\n # e.g. def __init__(self, w3):\n setattr(parent_module, module_name, module_class(w3))\n else:\n # Modules need not take in a ``Web3`` instance in their ``__init__()`` if not needed\n setattr(parent_module, module_name, module_class())\n\n if module_info_is_list_like:\n if len(module_info) == 2:\n submodule_definitions = module_info[1]\n module = getattr(parent_module, module_name)\n attach_modules(module, submodule_definitions, w3)\n elif len(module_info) != 1:\n raise ValidationError(\"Module definitions can only have 1 or 2 elements.\")\n", "path": "web3/_utils/module.py"}]}
1,852
222
gh_patches_debug_13579
rasdani/github-patches
git_diff
chainer__chainer-2234
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Wrong behavior of SerialIterator if dataset is numpy array. In `serial_iterator.py` lines [66](https://github.com/pfnet/chainer/blob/master/chainer/iterators/serial_iterator.py#L66) and [68](https://github.com/pfnet/chainer/blob/master/chainer/iterators/serial_iterator.py#L68) contain `+=` to extend lists. Unfortunately, this does not crash for `numpy` arrays but produces undesired behavior. The dataset itself gets modified (see for `6` in the second example): ``` import numpy as np from chainer.iterators.serial_iterator import SerialIterator dataset = [1, 2, 3, 4, 5] it = SerialIterator(dataset, batch_size=2, shuffle=False) for _ in range(6): example = it.next() print(example) ``` ``` [1, 2] [3, 4] [5, 1] [2, 3] [4, 5] [1, 2] ``` ``` dataset = np.asarray([1, 2, 3, 4, 5]) it = SerialIterator(dataset, batch_size=2, shuffle=False) for _ in range(6): example = it.next() print(example) ``` ``` [1 2] [3 4] [6] [2 3] [4 6] [1 2] ``` When the two lines get changed to ``` batch.extend(list(self.dataset[:rest])) ``` and ``` batch.extend([self.dataset[index] for index in self._order[:rest]]) ``` a useful error is raised. </issue> <code> [start of chainer/iterators/serial_iterator.py] 1 from __future__ import division 2 3 import numpy 4 5 from chainer.dataset import iterator 6 7 8 class SerialIterator(iterator.Iterator): 9 10 """Dataset iterator that serially reads the examples. 11 12 This is a simple implementation of :class:`~chainer.dataset.Iterator` 13 that just visits each example in either the order of indexes or a shuffled 14 order. 15 16 To avoid unintentional performance degradation, the ``shuffle`` option is 17 set to ``True`` by default. For validation, it is better to set it to 18 ``False`` when the underlying dataset supports fast slicing. If the 19 order of examples has an important meaning and the updater depends on the 20 original order, this option should be set to ``False``. 21 22 Args: 23 dataset: Dataset to iterate. 24 batch_size (int): Number of examples within each batch. 25 repeat (bool): If ``True``, it infinitely loops over the dataset. 26 Otherwise, it stops iteration at the end of the first epoch. 27 shuffle (bool): If ``True``, the order of examples is shuffled at the 28 beginning of each epoch. Otherwise, examples are extracted in the 29 order of indexes. 30 31 """ 32 33 def __init__(self, dataset, batch_size, repeat=True, shuffle=True): 34 self.dataset = dataset 35 self.batch_size = batch_size 36 self._repeat = repeat 37 if shuffle: 38 self._order = numpy.random.permutation(len(dataset)) 39 else: 40 self._order = None 41 42 self.current_position = 0 43 self.epoch = 0 44 self.is_new_epoch = False 45 46 def __next__(self): 47 if not self._repeat and self.epoch > 0: 48 raise StopIteration 49 50 i = self.current_position 51 i_end = i + self.batch_size 52 N = len(self.dataset) 53 54 if self._order is None: 55 batch = self.dataset[i:i_end] 56 else: 57 batch = [self.dataset[index] for index in self._order[i:i_end]] 58 59 if i_end >= N: 60 if self._repeat: 61 rest = i_end - N 62 if self._order is not None: 63 numpy.random.shuffle(self._order) 64 if rest > 0: 65 if self._order is None: 66 batch += list(self.dataset[:rest]) 67 else: 68 batch += [self.dataset[index] 69 for index in self._order[:rest]] 70 self.current_position = rest 71 else: 72 self.current_position = N 73 74 self.epoch += 1 75 self.is_new_epoch = True 76 else: 77 self.is_new_epoch = False 78 self.current_position = i_end 79 80 return batch 81 82 next = __next__ 83 84 @property 85 def epoch_detail(self): 86 return self.epoch + self.current_position / len(self.dataset) 87 88 def serialize(self, serializer): 89 self.current_position = serializer('current_position', 90 self.current_position) 91 self.epoch = serializer('epoch', self.epoch) 92 self.is_new_epoch = serializer('is_new_epoch', self.is_new_epoch) 93 if self._order is not None: 94 serializer('_order', self._order) 95 [end of chainer/iterators/serial_iterator.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/iterators/serial_iterator.py b/chainer/iterators/serial_iterator.py --- a/chainer/iterators/serial_iterator.py +++ b/chainer/iterators/serial_iterator.py @@ -63,10 +63,10 @@ numpy.random.shuffle(self._order) if rest > 0: if self._order is None: - batch += list(self.dataset[:rest]) + batch.extend(self.dataset[:rest]) else: - batch += [self.dataset[index] - for index in self._order[:rest]] + batch.extend([self.dataset[index] + for index in self._order[:rest]]) self.current_position = rest else: self.current_position = N
{"golden_diff": "diff --git a/chainer/iterators/serial_iterator.py b/chainer/iterators/serial_iterator.py\n--- a/chainer/iterators/serial_iterator.py\n+++ b/chainer/iterators/serial_iterator.py\n@@ -63,10 +63,10 @@\n numpy.random.shuffle(self._order)\n if rest > 0:\n if self._order is None:\n- batch += list(self.dataset[:rest])\n+ batch.extend(self.dataset[:rest])\n else:\n- batch += [self.dataset[index]\n- for index in self._order[:rest]]\n+ batch.extend([self.dataset[index]\n+ for index in self._order[:rest]])\n self.current_position = rest\n else:\n self.current_position = N\n", "issue": "Wrong behavior of SerialIterator if dataset is numpy array.\nIn `serial_iterator.py` lines [66](https://github.com/pfnet/chainer/blob/master/chainer/iterators/serial_iterator.py#L66) and [68](https://github.com/pfnet/chainer/blob/master/chainer/iterators/serial_iterator.py#L68) contain `+=` to extend lists.\r\n\r\nUnfortunately, this does not crash for `numpy` arrays but produces undesired behavior. The dataset itself gets modified (see for `6` in the second example):\r\n\r\n```\r\nimport numpy as np\r\nfrom chainer.iterators.serial_iterator import SerialIterator\r\n\r\ndataset = [1, 2, 3, 4, 5]\r\nit = SerialIterator(dataset, batch_size=2, shuffle=False)\r\n\r\nfor _ in range(6):\r\n example = it.next()\r\n print(example)\r\n```\r\n\r\n```\r\n[1, 2]\r\n[3, 4]\r\n[5, 1]\r\n[2, 3]\r\n[4, 5]\r\n[1, 2]\r\n```\r\n\r\n```\r\ndataset = np.asarray([1, 2, 3, 4, 5])\r\nit = SerialIterator(dataset, batch_size=2, shuffle=False)\r\n\r\nfor _ in range(6):\r\n example = it.next()\r\n print(example)\r\n```\r\n\r\n```\r\n[1 2]\r\n[3 4]\r\n[6]\r\n[2 3]\r\n[4 6]\r\n[1 2]\r\n```\r\n\r\nWhen the two lines get changed to\r\n```\r\nbatch.extend(list(self.dataset[:rest]))\r\n```\r\n\r\nand\r\n```\r\nbatch.extend([self.dataset[index] for index in self._order[:rest]])\r\n```\r\n\r\na useful error is raised.\n", "before_files": [{"content": "from __future__ import division\n\nimport numpy\n\nfrom chainer.dataset import iterator\n\n\nclass SerialIterator(iterator.Iterator):\n\n \"\"\"Dataset iterator that serially reads the examples.\n\n This is a simple implementation of :class:`~chainer.dataset.Iterator`\n that just visits each example in either the order of indexes or a shuffled\n order.\n\n To avoid unintentional performance degradation, the ``shuffle`` option is\n set to ``True`` by default. For validation, it is better to set it to\n ``False`` when the underlying dataset supports fast slicing. If the\n order of examples has an important meaning and the updater depends on the\n original order, this option should be set to ``False``.\n\n Args:\n dataset: Dataset to iterate.\n batch_size (int): Number of examples within each batch.\n repeat (bool): If ``True``, it infinitely loops over the dataset.\n Otherwise, it stops iteration at the end of the first epoch.\n shuffle (bool): If ``True``, the order of examples is shuffled at the\n beginning of each epoch. Otherwise, examples are extracted in the\n order of indexes.\n\n \"\"\"\n\n def __init__(self, dataset, batch_size, repeat=True, shuffle=True):\n self.dataset = dataset\n self.batch_size = batch_size\n self._repeat = repeat\n if shuffle:\n self._order = numpy.random.permutation(len(dataset))\n else:\n self._order = None\n\n self.current_position = 0\n self.epoch = 0\n self.is_new_epoch = False\n\n def __next__(self):\n if not self._repeat and self.epoch > 0:\n raise StopIteration\n\n i = self.current_position\n i_end = i + self.batch_size\n N = len(self.dataset)\n\n if self._order is None:\n batch = self.dataset[i:i_end]\n else:\n batch = [self.dataset[index] for index in self._order[i:i_end]]\n\n if i_end >= N:\n if self._repeat:\n rest = i_end - N\n if self._order is not None:\n numpy.random.shuffle(self._order)\n if rest > 0:\n if self._order is None:\n batch += list(self.dataset[:rest])\n else:\n batch += [self.dataset[index]\n for index in self._order[:rest]]\n self.current_position = rest\n else:\n self.current_position = N\n\n self.epoch += 1\n self.is_new_epoch = True\n else:\n self.is_new_epoch = False\n self.current_position = i_end\n\n return batch\n\n next = __next__\n\n @property\n def epoch_detail(self):\n return self.epoch + self.current_position / len(self.dataset)\n\n def serialize(self, serializer):\n self.current_position = serializer('current_position',\n self.current_position)\n self.epoch = serializer('epoch', self.epoch)\n self.is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)\n if self._order is not None:\n serializer('_order', self._order)\n", "path": "chainer/iterators/serial_iterator.py"}]}
1,755
165
gh_patches_debug_39730
rasdani/github-patches
git_diff
carpentries__amy-351
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Deal with breadcrumbs As Greg mentioned in #296, he hadn't liked the way breadcrumbs repeat current page title (or header) in their last element, for example: page "Event 2015-05-25-something" will have breadcrumbs "Amy / All events / Event 2015-05-25-something". I took a look at other big websites and how they do breadcrumbs and @gvwilson was right. They don't repeat current site at the end of breadcrumbs. This means we'd only have breadcrumbs at most 3 links long: Amy / All \* / \* [ / action ], for example: Was: - Amy / All events / Event 2015-05-25-something / Edit Will be: - Amy / All events / Event 2015-05-25-something But this does not bug me. In case of `All *` pages, we can just as well drop breadcrumbs (because they'd look like "Amy / "). So I don't really know what to do: 1. Display breadcrumbs on the same pages as now, but hide the last item. 2. Display breadcrumbs on the pages that would have more than 1 breadcrumbs. 3. Drop breadcrumbs completely. </issue> <code> [start of workshops/templatetags/breadcrumbs.py] 1 import logging 2 3 from django import template 4 from django.core.urlresolvers import reverse 5 from django.utils.encoding import force_text 6 from django.utils.html import escape 7 8 register = template.Library() 9 _LOG = logging.getLogger(__name__) 10 11 12 @register.simple_tag 13 def breadcrumb(title, url): 14 ''' 15 Create a simple anchor with provided text and already-resolved URL. 16 Example usage: 17 {% breadcrumb "Title of breadcrumb" resolved_url %} 18 ''' 19 return create_crumb(title, url) 20 21 22 @register.simple_tag 23 def breadcrumb_url(title, url_name): 24 ''' 25 Add non-active breadcrumb with specified title. Second argument should be 26 a string name of URL that needs to be resolved. 27 Example usage: 28 {% breadcrumb_url "Title of breadcrumb" url_name %} 29 ''' 30 url = reverse(url_name) 31 return create_crumb(title, url) 32 33 34 @register.simple_tag 35 def breadcrumb_active(title): 36 ''' 37 Add active breadcrumb, but not in an anchor. 38 Example usage: 39 {% breadcrumb_active "Title of breadcrumb" %} 40 ''' 41 return create_crumb(str(title), url=None, active=True) 42 43 44 @register.simple_tag 45 def breadcrumb_index_all_objects(model): 46 ''' 47 Add breadcrumb linking to the listing of all objects of specific type. 48 This tag accepts both models or model instances as an argument. 49 Example usage: 50 {% breadcrumb_index_all_objects model %} 51 {% breadcrumb_index_all_objects person %} 52 ''' 53 plural = force_text(model._meta.verbose_name_plural) 54 title = 'All {}'.format(plural) 55 url_name = 'all_{}'.format(plural) 56 url = reverse(url_name) 57 return create_crumb(title, url) 58 59 60 @register.simple_tag 61 def breadcrumb_edit_object(obj): 62 ''' 63 Add an active breadcrumb with the title "Edit MODEL_NAME". 64 This tag accepts model instance as an argument. 65 Example usage: 66 {% breadcrumb_edit_object person %} 67 ''' 68 singular = force_text(obj._meta.verbose_name) 69 title = 'Edit {}'.format(singular) 70 return create_crumb(title, url=None, active=True) 71 72 73 @register.simple_tag 74 def breadcrumb_new_object(model): 75 ''' 76 Add an active breadcrumb with the title "Add new MODEL_NAME". 77 This tag accepts model class as an argument. 78 Example usage: 79 {% breadcrumb_new_object person %} 80 ''' 81 singular = force_text(model._meta.verbose_name) 82 title = 'Add new {}'.format(singular) 83 return create_crumb(title, url=None, active=True) 84 85 86 @register.simple_tag 87 def breadcrumb_object(obj): 88 ''' 89 Add non-active breadcrumb with the title "Add new MODEL_NAME". 90 This tag accepts model instance as an argument. 91 Example usage: 92 {% breadcrumb_object person %} 93 ''' 94 title = str(obj) 95 url = obj.get_absolute_url() 96 return create_crumb(title, url, active=False) 97 98 99 @register.simple_tag 100 def breadcrumb_main_page(): 101 ''' 102 Special case of ``breadcrumb_url``. In all templates there's always a link 103 to the main page so I wanted to save everyone thinking & writing by 104 introducing this helper tag. 105 Example usage: 106 {% breadcrumb_main_page %} 107 ''' 108 title = 'Amy' 109 url = reverse('index') 110 return create_crumb(title, url) 111 112 113 def create_crumb(title, url=None, active=False): 114 ''' 115 Helper function that creates breadcrumb. 116 ''' 117 active_str = '' 118 if active: 119 active_str = ' class="active"' 120 121 title = escape(title) 122 inner_str = title 123 if url: 124 inner_str = '<a href="{0}">{1}</a>'.format(url, title) 125 126 crumb = '<li{0}>{1}</li>'.format(active_str, inner_str) 127 128 return crumb 129 [end of workshops/templatetags/breadcrumbs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/workshops/templatetags/breadcrumbs.py b/workshops/templatetags/breadcrumbs.py deleted file mode 100644 --- a/workshops/templatetags/breadcrumbs.py +++ /dev/null @@ -1,128 +0,0 @@ -import logging - -from django import template -from django.core.urlresolvers import reverse -from django.utils.encoding import force_text -from django.utils.html import escape - -register = template.Library() -_LOG = logging.getLogger(__name__) - - [email protected]_tag -def breadcrumb(title, url): - ''' - Create a simple anchor with provided text and already-resolved URL. - Example usage: - {% breadcrumb "Title of breadcrumb" resolved_url %} - ''' - return create_crumb(title, url) - - [email protected]_tag -def breadcrumb_url(title, url_name): - ''' - Add non-active breadcrumb with specified title. Second argument should be - a string name of URL that needs to be resolved. - Example usage: - {% breadcrumb_url "Title of breadcrumb" url_name %} - ''' - url = reverse(url_name) - return create_crumb(title, url) - - [email protected]_tag -def breadcrumb_active(title): - ''' - Add active breadcrumb, but not in an anchor. - Example usage: - {% breadcrumb_active "Title of breadcrumb" %} - ''' - return create_crumb(str(title), url=None, active=True) - - [email protected]_tag -def breadcrumb_index_all_objects(model): - ''' - Add breadcrumb linking to the listing of all objects of specific type. - This tag accepts both models or model instances as an argument. - Example usage: - {% breadcrumb_index_all_objects model %} - {% breadcrumb_index_all_objects person %} - ''' - plural = force_text(model._meta.verbose_name_plural) - title = 'All {}'.format(plural) - url_name = 'all_{}'.format(plural) - url = reverse(url_name) - return create_crumb(title, url) - - [email protected]_tag -def breadcrumb_edit_object(obj): - ''' - Add an active breadcrumb with the title "Edit MODEL_NAME". - This tag accepts model instance as an argument. - Example usage: - {% breadcrumb_edit_object person %} - ''' - singular = force_text(obj._meta.verbose_name) - title = 'Edit {}'.format(singular) - return create_crumb(title, url=None, active=True) - - [email protected]_tag -def breadcrumb_new_object(model): - ''' - Add an active breadcrumb with the title "Add new MODEL_NAME". - This tag accepts model class as an argument. - Example usage: - {% breadcrumb_new_object person %} - ''' - singular = force_text(model._meta.verbose_name) - title = 'Add new {}'.format(singular) - return create_crumb(title, url=None, active=True) - - [email protected]_tag -def breadcrumb_object(obj): - ''' - Add non-active breadcrumb with the title "Add new MODEL_NAME". - This tag accepts model instance as an argument. - Example usage: - {% breadcrumb_object person %} - ''' - title = str(obj) - url = obj.get_absolute_url() - return create_crumb(title, url, active=False) - - [email protected]_tag -def breadcrumb_main_page(): - ''' - Special case of ``breadcrumb_url``. In all templates there's always a link - to the main page so I wanted to save everyone thinking & writing by - introducing this helper tag. - Example usage: - {% breadcrumb_main_page %} - ''' - title = 'Amy' - url = reverse('index') - return create_crumb(title, url) - - -def create_crumb(title, url=None, active=False): - ''' - Helper function that creates breadcrumb. - ''' - active_str = '' - if active: - active_str = ' class="active"' - - title = escape(title) - inner_str = title - if url: - inner_str = '<a href="{0}">{1}</a>'.format(url, title) - - crumb = '<li{0}>{1}</li>'.format(active_str, inner_str) - - return crumb
{"golden_diff": "diff --git a/workshops/templatetags/breadcrumbs.py b/workshops/templatetags/breadcrumbs.py\ndeleted file mode 100644\n--- a/workshops/templatetags/breadcrumbs.py\n+++ /dev/null\n@@ -1,128 +0,0 @@\n-import logging\n-\n-from django import template\n-from django.core.urlresolvers import reverse\n-from django.utils.encoding import force_text\n-from django.utils.html import escape\n-\n-register = template.Library()\n-_LOG = logging.getLogger(__name__)\n-\n-\[email protected]_tag\n-def breadcrumb(title, url):\n- '''\n- Create a simple anchor with provided text and already-resolved URL.\n- Example usage:\n- {% breadcrumb \"Title of breadcrumb\" resolved_url %}\n- '''\n- return create_crumb(title, url)\n-\n-\[email protected]_tag\n-def breadcrumb_url(title, url_name):\n- '''\n- Add non-active breadcrumb with specified title. Second argument should be\n- a string name of URL that needs to be resolved.\n- Example usage:\n- {% breadcrumb_url \"Title of breadcrumb\" url_name %}\n- '''\n- url = reverse(url_name)\n- return create_crumb(title, url)\n-\n-\[email protected]_tag\n-def breadcrumb_active(title):\n- '''\n- Add active breadcrumb, but not in an anchor.\n- Example usage:\n- {% breadcrumb_active \"Title of breadcrumb\" %}\n- '''\n- return create_crumb(str(title), url=None, active=True)\n-\n-\[email protected]_tag\n-def breadcrumb_index_all_objects(model):\n- '''\n- Add breadcrumb linking to the listing of all objects of specific type.\n- This tag accepts both models or model instances as an argument.\n- Example usage:\n- {% breadcrumb_index_all_objects model %}\n- {% breadcrumb_index_all_objects person %}\n- '''\n- plural = force_text(model._meta.verbose_name_plural)\n- title = 'All {}'.format(plural)\n- url_name = 'all_{}'.format(plural)\n- url = reverse(url_name)\n- return create_crumb(title, url)\n-\n-\[email protected]_tag\n-def breadcrumb_edit_object(obj):\n- '''\n- Add an active breadcrumb with the title \"Edit MODEL_NAME\".\n- This tag accepts model instance as an argument.\n- Example usage:\n- {% breadcrumb_edit_object person %}\n- '''\n- singular = force_text(obj._meta.verbose_name)\n- title = 'Edit {}'.format(singular)\n- return create_crumb(title, url=None, active=True)\n-\n-\[email protected]_tag\n-def breadcrumb_new_object(model):\n- '''\n- Add an active breadcrumb with the title \"Add new MODEL_NAME\".\n- This tag accepts model class as an argument.\n- Example usage:\n- {% breadcrumb_new_object person %}\n- '''\n- singular = force_text(model._meta.verbose_name)\n- title = 'Add new {}'.format(singular)\n- return create_crumb(title, url=None, active=True)\n-\n-\[email protected]_tag\n-def breadcrumb_object(obj):\n- '''\n- Add non-active breadcrumb with the title \"Add new MODEL_NAME\".\n- This tag accepts model instance as an argument.\n- Example usage:\n- {% breadcrumb_object person %}\n- '''\n- title = str(obj)\n- url = obj.get_absolute_url()\n- return create_crumb(title, url, active=False)\n-\n-\[email protected]_tag\n-def breadcrumb_main_page():\n- '''\n- Special case of ``breadcrumb_url``. In all templates there's always a link\n- to the main page so I wanted to save everyone thinking & writing by\n- introducing this helper tag.\n- Example usage:\n- {% breadcrumb_main_page %}\n- '''\n- title = 'Amy'\n- url = reverse('index')\n- return create_crumb(title, url)\n-\n-\n-def create_crumb(title, url=None, active=False):\n- '''\n- Helper function that creates breadcrumb.\n- '''\n- active_str = ''\n- if active:\n- active_str = ' class=\"active\"'\n-\n- title = escape(title)\n- inner_str = title\n- if url:\n- inner_str = '<a href=\"{0}\">{1}</a>'.format(url, title)\n-\n- crumb = '<li{0}>{1}</li>'.format(active_str, inner_str)\n-\n- return crumb\n", "issue": "Deal with breadcrumbs\nAs Greg mentioned in #296, he hadn't liked the way breadcrumbs repeat current page title (or header) in their last element, for example: page \"Event 2015-05-25-something\" will have breadcrumbs \"Amy / All events / Event 2015-05-25-something\".\n\nI took a look at other big websites and how they do breadcrumbs and @gvwilson was right. They don't repeat current site at the end of breadcrumbs.\n\nThis means we'd only have breadcrumbs at most 3 links long: Amy / All \\* / \\* [ / action ], for example:\n\nWas:\n- Amy / All events / Event 2015-05-25-something / Edit\n\nWill be:\n- Amy / All events / Event 2015-05-25-something\n\nBut this does not bug me. In case of `All *` pages, we can just as well drop breadcrumbs (because they'd look like \"Amy / \").\n\nSo I don't really know what to do:\n1. Display breadcrumbs on the same pages as now, but hide the last item.\n2. Display breadcrumbs on the pages that would have more than 1 breadcrumbs.\n3. Drop breadcrumbs completely.\n\n", "before_files": [{"content": "import logging\n\nfrom django import template\nfrom django.core.urlresolvers import reverse\nfrom django.utils.encoding import force_text\nfrom django.utils.html import escape\n\nregister = template.Library()\n_LOG = logging.getLogger(__name__)\n\n\[email protected]_tag\ndef breadcrumb(title, url):\n '''\n Create a simple anchor with provided text and already-resolved URL.\n Example usage:\n {% breadcrumb \"Title of breadcrumb\" resolved_url %}\n '''\n return create_crumb(title, url)\n\n\[email protected]_tag\ndef breadcrumb_url(title, url_name):\n '''\n Add non-active breadcrumb with specified title. Second argument should be\n a string name of URL that needs to be resolved.\n Example usage:\n {% breadcrumb_url \"Title of breadcrumb\" url_name %}\n '''\n url = reverse(url_name)\n return create_crumb(title, url)\n\n\[email protected]_tag\ndef breadcrumb_active(title):\n '''\n Add active breadcrumb, but not in an anchor.\n Example usage:\n {% breadcrumb_active \"Title of breadcrumb\" %}\n '''\n return create_crumb(str(title), url=None, active=True)\n\n\[email protected]_tag\ndef breadcrumb_index_all_objects(model):\n '''\n Add breadcrumb linking to the listing of all objects of specific type.\n This tag accepts both models or model instances as an argument.\n Example usage:\n {% breadcrumb_index_all_objects model %}\n {% breadcrumb_index_all_objects person %}\n '''\n plural = force_text(model._meta.verbose_name_plural)\n title = 'All {}'.format(plural)\n url_name = 'all_{}'.format(plural)\n url = reverse(url_name)\n return create_crumb(title, url)\n\n\[email protected]_tag\ndef breadcrumb_edit_object(obj):\n '''\n Add an active breadcrumb with the title \"Edit MODEL_NAME\".\n This tag accepts model instance as an argument.\n Example usage:\n {% breadcrumb_edit_object person %}\n '''\n singular = force_text(obj._meta.verbose_name)\n title = 'Edit {}'.format(singular)\n return create_crumb(title, url=None, active=True)\n\n\[email protected]_tag\ndef breadcrumb_new_object(model):\n '''\n Add an active breadcrumb with the title \"Add new MODEL_NAME\".\n This tag accepts model class as an argument.\n Example usage:\n {% breadcrumb_new_object person %}\n '''\n singular = force_text(model._meta.verbose_name)\n title = 'Add new {}'.format(singular)\n return create_crumb(title, url=None, active=True)\n\n\[email protected]_tag\ndef breadcrumb_object(obj):\n '''\n Add non-active breadcrumb with the title \"Add new MODEL_NAME\".\n This tag accepts model instance as an argument.\n Example usage:\n {% breadcrumb_object person %}\n '''\n title = str(obj)\n url = obj.get_absolute_url()\n return create_crumb(title, url, active=False)\n\n\[email protected]_tag\ndef breadcrumb_main_page():\n '''\n Special case of ``breadcrumb_url``. In all templates there's always a link\n to the main page so I wanted to save everyone thinking & writing by\n introducing this helper tag.\n Example usage:\n {% breadcrumb_main_page %}\n '''\n title = 'Amy'\n url = reverse('index')\n return create_crumb(title, url)\n\n\ndef create_crumb(title, url=None, active=False):\n '''\n Helper function that creates breadcrumb.\n '''\n active_str = ''\n if active:\n active_str = ' class=\"active\"'\n\n title = escape(title)\n inner_str = title\n if url:\n inner_str = '<a href=\"{0}\">{1}</a>'.format(url, title)\n\n crumb = '<li{0}>{1}</li>'.format(active_str, inner_str)\n\n return crumb\n", "path": "workshops/templatetags/breadcrumbs.py"}]}
1,888
951
gh_patches_debug_25587
rasdani/github-patches
git_diff
vllm-project__vllm-148
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Documentation on running basic python server and FastAPI server </issue> <code> [start of vllm/outputs.py] 1 from typing import Dict, List, Optional 2 3 from vllm.sequence import SequenceGroup, SequenceStatus 4 5 6 class CompletionOutput: 7 8 def __init__( 9 self, 10 index: int, 11 text: str, 12 token_ids: List[int], 13 cumulative_logprob: float, 14 logprobs: List[Dict[int, float]], 15 finish_reason: Optional[str] = None, 16 ) -> None: 17 self.index = index 18 self.text = text 19 self.token_ids = token_ids 20 self.cumulative_logprob = cumulative_logprob 21 self.logprobs = logprobs 22 self.finish_reason = finish_reason 23 24 def finished(self) -> bool: 25 return self.finish_reason is not None 26 27 def __repr__(self) -> str: 28 return (f"CompletionOutput(index={self.index}, " 29 f"text={self.text!r}, " 30 f"token_ids={self.token_ids}, " 31 f"cumulative_logprob={self.cumulative_logprob}, " 32 f"logprobs={self.logprobs}," 33 f"finish_reason={self.finish_reason})") 34 35 36 class RequestOutput: 37 38 def __init__( 39 self, 40 request_id: str, 41 prompt: str, 42 prompt_token_ids: List[int], 43 outputs: List[CompletionOutput], 44 ) -> None: 45 self.request_id = request_id 46 self.prompt = prompt 47 self.prompt_token_ids = prompt_token_ids 48 self.outputs = outputs 49 50 @classmethod 51 def from_seq_group(cls, seq_group: SequenceGroup) -> "RequestOutput": 52 # Get the top-n sequences. 53 n = seq_group.sampling_params.n 54 seqs = seq_group.get_seqs() 55 assert n <= len(seqs) 56 sorted_seqs = sorted( 57 seqs, key=lambda seq: seq.get_cumulative_logprob(), reverse=True) 58 top_n_seqs = sorted_seqs[:n] 59 60 # Create the outputs. 61 outputs: List[CompletionOutput] = [] 62 for seq in top_n_seqs: 63 logprobs = seq.output_logprobs 64 if seq_group.sampling_params.logprobs is None: 65 # NOTE: We need to take care of this case because the sequence 66 # always has the logprobs of the sampled tokens even if the 67 # logprobs are not requested. 68 logprobs = {} 69 finshed_reason = SequenceStatus.get_finished_reason(seq.status) 70 output = CompletionOutput(seqs.index(seq), seq.output_text, 71 seq.get_output_token_ids(), 72 seq.get_cumulative_logprob(), logprobs, 73 finshed_reason) 74 outputs.append(output) 75 76 # Every sequence in the sequence group should have the same prompt. 77 prompt = top_n_seqs[0].prompt 78 prompt_token_ids = top_n_seqs[0].data.prompt_token_ids 79 return cls(seq_group.request_id, prompt, prompt_token_ids, outputs) 80 81 def __repr__(self) -> str: 82 return (f"RequestOutput(request_id={self.request_id}, " 83 f"prompt={self.prompt!r}, " 84 f"prompt_token_ids={self.prompt_token_ids}, " 85 f"outputs={self.outputs})") 86 87 def finished(self) -> bool: 88 return all(output.finished() for output in self.outputs) 89 [end of vllm/outputs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/vllm/outputs.py b/vllm/outputs.py --- a/vllm/outputs.py +++ b/vllm/outputs.py @@ -4,6 +4,18 @@ class CompletionOutput: + """The output data of one completion output of a request. + + Args: + index: The index of the output in the request. + text: The generated output text. + token_ids: The token IDs of the generated output text. + cumulative_logprob: The cumulative log probability of the generated + output text. + logprobs: The log probabilities of the top probability words at each + position if the logprobs are requested. + finish_reason: The reason why the sequence is finished. + """ def __init__( self, @@ -11,7 +23,7 @@ text: str, token_ids: List[int], cumulative_logprob: float, - logprobs: List[Dict[int, float]], + logprobs: Optional[List[Dict[int, float]]], finish_reason: Optional[str] = None, ) -> None: self.index = index @@ -34,7 +46,14 @@ class RequestOutput: + """The output data of a request to the LLM. + Args: + request_id: The unique ID of the request. + prompt: The prompt string of the request. + prompt_token_ids: The token IDs of the prompt. + outputs: The output sequences of the request. + """ def __init__( self, request_id: str,
{"golden_diff": "diff --git a/vllm/outputs.py b/vllm/outputs.py\n--- a/vllm/outputs.py\n+++ b/vllm/outputs.py\n@@ -4,6 +4,18 @@\n \n \n class CompletionOutput:\n+ \"\"\"The output data of one completion output of a request.\n+\n+ Args:\n+ index: The index of the output in the request.\n+ text: The generated output text.\n+ token_ids: The token IDs of the generated output text.\n+ cumulative_logprob: The cumulative log probability of the generated\n+ output text.\n+ logprobs: The log probabilities of the top probability words at each\n+ position if the logprobs are requested.\n+ finish_reason: The reason why the sequence is finished.\n+ \"\"\"\n \n def __init__(\n self,\n@@ -11,7 +23,7 @@\n text: str,\n token_ids: List[int],\n cumulative_logprob: float,\n- logprobs: List[Dict[int, float]],\n+ logprobs: Optional[List[Dict[int, float]]],\n finish_reason: Optional[str] = None,\n ) -> None:\n self.index = index\n@@ -34,7 +46,14 @@\n \n \n class RequestOutput:\n+ \"\"\"The output data of a request to the LLM.\n \n+ Args:\n+ request_id: The unique ID of the request.\n+ prompt: The prompt string of the request.\n+ prompt_token_ids: The token IDs of the prompt.\n+ outputs: The output sequences of the request.\n+ \"\"\"\n def __init__(\n self,\n request_id: str,\n", "issue": "Documentation on running basic python server and FastAPI server\n\n", "before_files": [{"content": "from typing import Dict, List, Optional\n\nfrom vllm.sequence import SequenceGroup, SequenceStatus\n\n\nclass CompletionOutput:\n\n def __init__(\n self,\n index: int,\n text: str,\n token_ids: List[int],\n cumulative_logprob: float,\n logprobs: List[Dict[int, float]],\n finish_reason: Optional[str] = None,\n ) -> None:\n self.index = index\n self.text = text\n self.token_ids = token_ids\n self.cumulative_logprob = cumulative_logprob\n self.logprobs = logprobs\n self.finish_reason = finish_reason\n\n def finished(self) -> bool:\n return self.finish_reason is not None\n\n def __repr__(self) -> str:\n return (f\"CompletionOutput(index={self.index}, \"\n f\"text={self.text!r}, \"\n f\"token_ids={self.token_ids}, \"\n f\"cumulative_logprob={self.cumulative_logprob}, \"\n f\"logprobs={self.logprobs},\"\n f\"finish_reason={self.finish_reason})\")\n\n\nclass RequestOutput:\n\n def __init__(\n self,\n request_id: str,\n prompt: str,\n prompt_token_ids: List[int],\n outputs: List[CompletionOutput],\n ) -> None:\n self.request_id = request_id\n self.prompt = prompt\n self.prompt_token_ids = prompt_token_ids\n self.outputs = outputs\n\n @classmethod\n def from_seq_group(cls, seq_group: SequenceGroup) -> \"RequestOutput\":\n # Get the top-n sequences.\n n = seq_group.sampling_params.n\n seqs = seq_group.get_seqs()\n assert n <= len(seqs)\n sorted_seqs = sorted(\n seqs, key=lambda seq: seq.get_cumulative_logprob(), reverse=True)\n top_n_seqs = sorted_seqs[:n]\n\n # Create the outputs.\n outputs: List[CompletionOutput] = []\n for seq in top_n_seqs:\n logprobs = seq.output_logprobs\n if seq_group.sampling_params.logprobs is None:\n # NOTE: We need to take care of this case because the sequence\n # always has the logprobs of the sampled tokens even if the\n # logprobs are not requested.\n logprobs = {}\n finshed_reason = SequenceStatus.get_finished_reason(seq.status)\n output = CompletionOutput(seqs.index(seq), seq.output_text,\n seq.get_output_token_ids(),\n seq.get_cumulative_logprob(), logprobs,\n finshed_reason)\n outputs.append(output)\n\n # Every sequence in the sequence group should have the same prompt.\n prompt = top_n_seqs[0].prompt\n prompt_token_ids = top_n_seqs[0].data.prompt_token_ids\n return cls(seq_group.request_id, prompt, prompt_token_ids, outputs)\n\n def __repr__(self) -> str:\n return (f\"RequestOutput(request_id={self.request_id}, \"\n f\"prompt={self.prompt!r}, \"\n f\"prompt_token_ids={self.prompt_token_ids}, \"\n f\"outputs={self.outputs})\")\n\n def finished(self) -> bool:\n return all(output.finished() for output in self.outputs)\n", "path": "vllm/outputs.py"}]}
1,415
361
gh_patches_debug_22017
rasdani/github-patches
git_diff
facebookresearch__CompilerGym-563
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `statistics.py` wrong parameter name ## 🐛 Bug The functions [here](https://github.com/facebookresearch/CompilerGym/blob/e248330d2475fbcdf473cc3df951f25b5eaf4945/compiler_gym/util/statistics.py#L8) says they take `iterable` as inputs. However, `np.asarray` actually take `array_like`. [Quote: ](https://numpy.org/doc/stable/reference/generated/numpy.asarray.html) > Input data, in any form that can be converted to an array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays. e.g. ```python geometric_mean(i for i in range(10)) ``` This will fail because though it's an `iterable`, it's not an `array_like`. </issue> <code> [start of compiler_gym/util/statistics.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. 2 # 3 # This source code is licensed under the MIT license found in the 4 # LICENSE file in the root directory of this source tree. 5 import numpy as np 6 7 8 def geometric_mean(iterable): 9 """Zero-length-safe geometric mean.""" 10 values = np.asarray(iterable) 11 if not values.size: 12 return 0 13 # Shortcut to return 0 when any element of the input is not positive. 14 if not np.all(values > 0): 15 return 0 16 a = np.log(values) 17 return np.exp(a.sum() / len(a)) 18 19 20 def arithmetic_mean(iterable): 21 """Zero-length-safe arithmetic mean.""" 22 values = np.asarray(iterable) 23 if not values.size: 24 return 0 25 return values.mean() 26 27 28 def stdev(iterable): 29 """Zero-length-safe standard deviation.""" 30 values = np.asarray(iterable) 31 if not values.size: 32 return 0 33 return values.std() 34 [end of compiler_gym/util/statistics.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/compiler_gym/util/statistics.py b/compiler_gym/util/statistics.py --- a/compiler_gym/util/statistics.py +++ b/compiler_gym/util/statistics.py @@ -5,9 +5,9 @@ import numpy as np -def geometric_mean(iterable): +def geometric_mean(array_like): """Zero-length-safe geometric mean.""" - values = np.asarray(iterable) + values = np.asarray(array_like) if not values.size: return 0 # Shortcut to return 0 when any element of the input is not positive. @@ -17,17 +17,17 @@ return np.exp(a.sum() / len(a)) -def arithmetic_mean(iterable): +def arithmetic_mean(array_like): """Zero-length-safe arithmetic mean.""" - values = np.asarray(iterable) + values = np.asarray(array_like) if not values.size: return 0 return values.mean() -def stdev(iterable): +def stdev(array_like): """Zero-length-safe standard deviation.""" - values = np.asarray(iterable) + values = np.asarray(array_like) if not values.size: return 0 return values.std()
{"golden_diff": "diff --git a/compiler_gym/util/statistics.py b/compiler_gym/util/statistics.py\n--- a/compiler_gym/util/statistics.py\n+++ b/compiler_gym/util/statistics.py\n@@ -5,9 +5,9 @@\n import numpy as np\n \n \n-def geometric_mean(iterable):\n+def geometric_mean(array_like):\n \"\"\"Zero-length-safe geometric mean.\"\"\"\n- values = np.asarray(iterable)\n+ values = np.asarray(array_like)\n if not values.size:\n return 0\n # Shortcut to return 0 when any element of the input is not positive.\n@@ -17,17 +17,17 @@\n return np.exp(a.sum() / len(a))\n \n \n-def arithmetic_mean(iterable):\n+def arithmetic_mean(array_like):\n \"\"\"Zero-length-safe arithmetic mean.\"\"\"\n- values = np.asarray(iterable)\n+ values = np.asarray(array_like)\n if not values.size:\n return 0\n return values.mean()\n \n \n-def stdev(iterable):\n+def stdev(array_like):\n \"\"\"Zero-length-safe standard deviation.\"\"\"\n- values = np.asarray(iterable)\n+ values = np.asarray(array_like)\n if not values.size:\n return 0\n return values.std()\n", "issue": "`statistics.py` wrong parameter name\n## \ud83d\udc1b Bug\r\n\r\nThe functions [here](https://github.com/facebookresearch/CompilerGym/blob/e248330d2475fbcdf473cc3df951f25b5eaf4945/compiler_gym/util/statistics.py#L8) says they take `iterable` as inputs. However, `np.asarray` actually take `array_like`.\r\n\r\n[Quote:\r\n](https://numpy.org/doc/stable/reference/generated/numpy.asarray.html)\r\n\r\n> Input data, in any form that can be converted to an array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays.\r\n\r\ne.g.\r\n```python\r\ngeometric_mean(i for i in range(10))\r\n```\r\nThis will fail because though it's an `iterable`, it's not an `array_like`.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport numpy as np\n\n\ndef geometric_mean(iterable):\n \"\"\"Zero-length-safe geometric mean.\"\"\"\n values = np.asarray(iterable)\n if not values.size:\n return 0\n # Shortcut to return 0 when any element of the input is not positive.\n if not np.all(values > 0):\n return 0\n a = np.log(values)\n return np.exp(a.sum() / len(a))\n\n\ndef arithmetic_mean(iterable):\n \"\"\"Zero-length-safe arithmetic mean.\"\"\"\n values = np.asarray(iterable)\n if not values.size:\n return 0\n return values.mean()\n\n\ndef stdev(iterable):\n \"\"\"Zero-length-safe standard deviation.\"\"\"\n values = np.asarray(iterable)\n if not values.size:\n return 0\n return values.std()\n", "path": "compiler_gym/util/statistics.py"}]}
1,002
261
gh_patches_debug_23027
rasdani/github-patches
git_diff
mirumee__ariadne-172
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unbound enum values are None when used in arguments When used as a mutation input, enum parameter should be `str`, but actually is `None`. ```python def test_executing_mutation_takes_enum(): type_defs = """ type Query { _: String } type Mutation { eat(meal: Meal!): Int! } enum Meal { SPAM } """ mutation = MutationType() @mutation.field("eat") def resolve_eat(*_, meal): # pylint: disable=unused-variable assert meal == "SPAM" return 42 schema = make_executable_schema(type_defs, mutation) result = graphql_sync(schema, 'mutation { eat(meal: SPAM) }') assert result.errors is None assert result.data == {"eat": 42} ``` </issue> <code> [start of ariadne/enums.py] 1 import enum 2 3 from typing import Any, Dict, Optional, Union, cast 4 5 from graphql.type import GraphQLEnumType, GraphQLNamedType, GraphQLSchema 6 7 from .types import SchemaBindable 8 9 10 class EnumType(SchemaBindable): 11 def __init__( 12 self, name: str, values=Union[Dict[str, Any], enum.Enum, enum.IntEnum] 13 ) -> None: 14 self.name = name 15 try: 16 self.values = values.__members__ # pylint: disable=no-member 17 except AttributeError: 18 self.values = values 19 20 def bind_to_schema(self, schema: GraphQLSchema) -> None: 21 graphql_type = schema.type_map.get(self.name) 22 self.validate_graphql_type(graphql_type) 23 graphql_type = cast(GraphQLEnumType, graphql_type) 24 25 for key, value in self.values.items(): 26 if key not in graphql_type.values: 27 raise ValueError( 28 "Value %s is not defined on enum %s" % (key, self.name) 29 ) 30 graphql_type.values[key].value = value 31 32 def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None: 33 if not graphql_type: 34 raise ValueError("Enum %s is not defined in the schema" % self.name) 35 if not isinstance(graphql_type, GraphQLEnumType): 36 raise ValueError( 37 "%s is defined in the schema, but it is instance of %s (expected %s)" 38 % (self.name, type(graphql_type).__name__, GraphQLEnumType.__name__) 39 ) 40 [end of ariadne/enums.py] [start of ariadne/executable_schema.py] 1 from typing import List, Union 2 3 from graphql import DocumentNode, GraphQLSchema, build_ast_schema, extend_schema, parse 4 5 from .types import SchemaBindable 6 7 8 def make_executable_schema( 9 type_defs: Union[str, List[str]], 10 bindables: Union[SchemaBindable, List[SchemaBindable], None] = None, 11 ) -> GraphQLSchema: 12 if isinstance(type_defs, list): 13 type_defs = join_type_defs(type_defs) 14 15 ast_document = parse(type_defs) 16 schema = build_and_extend_schema(ast_document) 17 18 if isinstance(bindables, list): 19 for obj in bindables: 20 obj.bind_to_schema(schema) 21 elif bindables: 22 bindables.bind_to_schema(schema) 23 24 return schema 25 26 27 def join_type_defs(type_defs: List[str]) -> str: 28 return "\n\n".join(t.strip() for t in type_defs) 29 30 31 def build_and_extend_schema(ast: DocumentNode) -> GraphQLSchema: 32 schema = build_ast_schema(ast) 33 extension_ast = extract_extensions(ast) 34 35 if extension_ast.definitions: 36 schema = extend_schema(schema, extension_ast) 37 38 return schema 39 40 41 EXTENSION_KINDS = [ 42 "scalar_type_extension", 43 "object_type_extension", 44 "interface_type_extension", 45 "union_type_extension", 46 "enum_type_extension", 47 "input_object_type_extension", 48 ] 49 50 51 def extract_extensions(ast: DocumentNode) -> DocumentNode: 52 extensions = [node for node in ast.definitions if node.kind in EXTENSION_KINDS] 53 return DocumentNode(definitions=extensions) 54 [end of ariadne/executable_schema.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ariadne/enums.py b/ariadne/enums.py --- a/ariadne/enums.py +++ b/ariadne/enums.py @@ -37,3 +37,15 @@ "%s is defined in the schema, but it is instance of %s (expected %s)" % (self.name, type(graphql_type).__name__, GraphQLEnumType.__name__) ) + + +def set_default_enum_values_on_schema(schema: GraphQLSchema): + for type_object in schema.type_map.values(): + if isinstance(type_object, GraphQLEnumType): + set_default_enum_values(type_object) + + +def set_default_enum_values(graphql_type: GraphQLEnumType): + for key in graphql_type.values: + if graphql_type.values[key].value is None: + graphql_type.values[key].value = key diff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py --- a/ariadne/executable_schema.py +++ b/ariadne/executable_schema.py @@ -2,6 +2,7 @@ from graphql import DocumentNode, GraphQLSchema, build_ast_schema, extend_schema, parse +from .enums import set_default_enum_values_on_schema from .types import SchemaBindable @@ -21,6 +22,8 @@ elif bindables: bindables.bind_to_schema(schema) + set_default_enum_values_on_schema(schema) + return schema
{"golden_diff": "diff --git a/ariadne/enums.py b/ariadne/enums.py\n--- a/ariadne/enums.py\n+++ b/ariadne/enums.py\n@@ -37,3 +37,15 @@\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLEnumType.__name__)\n )\n+\n+\n+def set_default_enum_values_on_schema(schema: GraphQLSchema):\n+ for type_object in schema.type_map.values():\n+ if isinstance(type_object, GraphQLEnumType):\n+ set_default_enum_values(type_object)\n+\n+\n+def set_default_enum_values(graphql_type: GraphQLEnumType):\n+ for key in graphql_type.values:\n+ if graphql_type.values[key].value is None:\n+ graphql_type.values[key].value = key\ndiff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py\n--- a/ariadne/executable_schema.py\n+++ b/ariadne/executable_schema.py\n@@ -2,6 +2,7 @@\n \n from graphql import DocumentNode, GraphQLSchema, build_ast_schema, extend_schema, parse\n \n+from .enums import set_default_enum_values_on_schema\n from .types import SchemaBindable\n \n \n@@ -21,6 +22,8 @@\n elif bindables:\n bindables.bind_to_schema(schema)\n \n+ set_default_enum_values_on_schema(schema)\n+\n return schema\n", "issue": "Unbound enum values are None when used in arguments\nWhen used as a mutation input, enum parameter should be `str`, but actually is `None`.\r\n\r\n```python\r\ndef test_executing_mutation_takes_enum():\r\n type_defs = \"\"\"\r\n type Query {\r\n _: String\r\n }\r\n\r\n type Mutation {\r\n eat(meal: Meal!): Int!\r\n }\r\n\r\n enum Meal {\r\n SPAM\r\n }\r\n \"\"\"\r\n\r\n mutation = MutationType()\r\n\r\n @mutation.field(\"eat\")\r\n def resolve_eat(*_, meal): # pylint: disable=unused-variable\r\n assert meal == \"SPAM\"\r\n return 42\r\n\r\n schema = make_executable_schema(type_defs, mutation)\r\n\r\n result = graphql_sync(schema, 'mutation { eat(meal: SPAM) }')\r\n assert result.errors is None\r\n assert result.data == {\"eat\": 42}\r\n```\n", "before_files": [{"content": "import enum\n\nfrom typing import Any, Dict, Optional, Union, cast\n\nfrom graphql.type import GraphQLEnumType, GraphQLNamedType, GraphQLSchema\n\nfrom .types import SchemaBindable\n\n\nclass EnumType(SchemaBindable):\n def __init__(\n self, name: str, values=Union[Dict[str, Any], enum.Enum, enum.IntEnum]\n ) -> None:\n self.name = name\n try:\n self.values = values.__members__ # pylint: disable=no-member\n except AttributeError:\n self.values = values\n\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n graphql_type = schema.type_map.get(self.name)\n self.validate_graphql_type(graphql_type)\n graphql_type = cast(GraphQLEnumType, graphql_type)\n\n for key, value in self.values.items():\n if key not in graphql_type.values:\n raise ValueError(\n \"Value %s is not defined on enum %s\" % (key, self.name)\n )\n graphql_type.values[key].value = value\n\n def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:\n if not graphql_type:\n raise ValueError(\"Enum %s is not defined in the schema\" % self.name)\n if not isinstance(graphql_type, GraphQLEnumType):\n raise ValueError(\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLEnumType.__name__)\n )\n", "path": "ariadne/enums.py"}, {"content": "from typing import List, Union\n\nfrom graphql import DocumentNode, GraphQLSchema, build_ast_schema, extend_schema, parse\n\nfrom .types import SchemaBindable\n\n\ndef make_executable_schema(\n type_defs: Union[str, List[str]],\n bindables: Union[SchemaBindable, List[SchemaBindable], None] = None,\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = join_type_defs(type_defs)\n\n ast_document = parse(type_defs)\n schema = build_and_extend_schema(ast_document)\n\n if isinstance(bindables, list):\n for obj in bindables:\n obj.bind_to_schema(schema)\n elif bindables:\n bindables.bind_to_schema(schema)\n\n return schema\n\n\ndef join_type_defs(type_defs: List[str]) -> str:\n return \"\\n\\n\".join(t.strip() for t in type_defs)\n\n\ndef build_and_extend_schema(ast: DocumentNode) -> GraphQLSchema:\n schema = build_ast_schema(ast)\n extension_ast = extract_extensions(ast)\n\n if extension_ast.definitions:\n schema = extend_schema(schema, extension_ast)\n\n return schema\n\n\nEXTENSION_KINDS = [\n \"scalar_type_extension\",\n \"object_type_extension\",\n \"interface_type_extension\",\n \"union_type_extension\",\n \"enum_type_extension\",\n \"input_object_type_extension\",\n]\n\n\ndef extract_extensions(ast: DocumentNode) -> DocumentNode:\n extensions = [node for node in ast.definitions if node.kind in EXTENSION_KINDS]\n return DocumentNode(definitions=extensions)\n", "path": "ariadne/executable_schema.py"}]}
1,577
330
gh_patches_debug_11707
rasdani/github-patches
git_diff
elastic__ecs-1164
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Incorrect output of the "tracing" fields in the Beats yml file Just like the `base` fields, the `tracing` fields are not nested under the name of the field set. So it's not `base.@timestamp`, it's `@timestamp`, and it's not `tracing.trace.id`, it's `trace.id`. In the Beats field yaml file the ECS project generates, the tracing fields are incorrectly nested under a `tracing` section, which means Beats interprets the field names incorrectly (`tracing.trace.id`). This is a bug, these fields shouldn't be nested this way. In order to fix this issue, we should remove this nesting in the Beats yml output. Just like `@timestamp` and other base fields are not nested under a field group. I think this bug fix will be at minimum backported to 1.7. Thoughts welcome on this, is there a need to backport to 1.6 as well? The Beats PR https://github.com/elastic/beats/pull/22571 to import ECS 1.7 should be adjusted with these changes, once the bug fix is ready. cc @andrewstucki </issue> <code> [start of scripts/generators/beats.py] 1 from os.path import join 2 from collections import OrderedDict 3 from generators import ecs_helpers 4 5 6 def generate(ecs_nested, ecs_version, out_dir): 7 # Load temporary whitelist for default_fields workaround. 8 df_whitelist = ecs_helpers.yaml_load('scripts/generators/beats_default_fields_whitelist.yml') 9 10 # base first 11 beats_fields = fieldset_field_array(ecs_nested['base']['fields'], df_whitelist, ecs_nested['base']['prefix']) 12 13 allowed_fieldset_keys = ['name', 'title', 'group', 'description', 'footnote', 'type'] 14 # other fieldsets 15 for fieldset_name in sorted(ecs_nested): 16 if 'base' == fieldset_name: 17 continue 18 fieldset = ecs_nested[fieldset_name] 19 20 beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys) 21 beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_whitelist, fieldset['prefix']) 22 beats_fields.append(beats_field) 23 24 beats_file = OrderedDict() 25 beats_file['key'] = 'ecs' 26 beats_file['title'] = 'ECS' 27 beats_file['description'] = 'ECS Fields.' 28 beats_file['fields'] = beats_fields 29 30 write_beats_yaml(beats_file, ecs_version, out_dir) 31 32 33 def fieldset_field_array(source_fields, df_whitelist, fieldset_prefix): 34 allowed_keys = ['name', 'level', 'required', 'type', 'object_type', 35 'ignore_above', 'multi_fields', 'format', 'input_format', 36 'output_format', 'output_precision', 'description', 37 'example', 'enabled', 'index', 'path', 'scaling_factor'] 38 multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above'] 39 40 fields = [] 41 for nested_field_name in source_fields: 42 ecs_field = source_fields[nested_field_name] 43 beats_field = ecs_helpers.dict_copy_keys_ordered(ecs_field, allowed_keys) 44 if '' == fieldset_prefix: 45 contextual_name = nested_field_name 46 else: 47 contextual_name = '.'.join(nested_field_name.split('.')[1:]) 48 49 cleaned_multi_fields = [] 50 if 'multi_fields' in ecs_field: 51 for mf in ecs_field['multi_fields']: 52 # Set default_field if necessary. Avoid adding the key if the parent 53 # field already is marked with default_field: false. 54 if not mf['flat_name'] in df_whitelist and ecs_field['flat_name'] in df_whitelist: 55 mf['default_field'] = False 56 cleaned_multi_fields.append( 57 ecs_helpers.dict_copy_keys_ordered(mf, multi_fields_allowed_keys)) 58 beats_field['multi_fields'] = cleaned_multi_fields 59 60 beats_field['name'] = contextual_name 61 62 if not ecs_field['flat_name'] in df_whitelist: 63 beats_field['default_field'] = False 64 65 fields.append(beats_field) 66 return sorted(fields, key=lambda x: x['name']) 67 68 # Helpers 69 70 71 def write_beats_yaml(beats_file, ecs_version, out_dir): 72 ecs_helpers.make_dirs(join(out_dir, 'beats')) 73 warning = file_header().format(version=ecs_version) 74 ecs_helpers.yaml_dump(join(out_dir, 'beats/fields.ecs.yml'), [beats_file], preamble=warning) 75 76 77 # Templates 78 79 80 def file_header(): 81 return ''' 82 # WARNING! Do not edit this file directly, it was generated by the ECS project, 83 # based on ECS version {version}. 84 # Please visit https://github.com/elastic/ecs to suggest changes to ECS fields. 85 86 '''.lstrip() 87 [end of scripts/generators/beats.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/generators/beats.py b/scripts/generators/beats.py --- a/scripts/generators/beats.py +++ b/scripts/generators/beats.py @@ -17,6 +17,11 @@ continue fieldset = ecs_nested[fieldset_name] + # Handle when `root:true` + if fieldset.get('root', False): + beats_fields.extend(fieldset_field_array(fieldset['fields'], df_whitelist, fieldset['prefix'])) + continue + beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys) beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_whitelist, fieldset['prefix']) beats_fields.append(beats_field)
{"golden_diff": "diff --git a/scripts/generators/beats.py b/scripts/generators/beats.py\n--- a/scripts/generators/beats.py\n+++ b/scripts/generators/beats.py\n@@ -17,6 +17,11 @@\n continue\n fieldset = ecs_nested[fieldset_name]\n \n+ # Handle when `root:true`\n+ if fieldset.get('root', False):\n+ beats_fields.extend(fieldset_field_array(fieldset['fields'], df_whitelist, fieldset['prefix']))\n+ continue\n+\n beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys)\n beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_whitelist, fieldset['prefix'])\n beats_fields.append(beats_field)\n", "issue": "Incorrect output of the \"tracing\" fields in the Beats yml file\nJust like the `base` fields, the `tracing` fields are not nested under the name of the field set. So it's not `base.@timestamp`, it's `@timestamp`, and it's not `tracing.trace.id`, it's `trace.id`.\r\n\r\nIn the Beats field yaml file the ECS project generates, the tracing fields are incorrectly nested under a `tracing` section, which means Beats interprets the field names incorrectly (`tracing.trace.id`).\r\n\r\nThis is a bug, these fields shouldn't be nested this way.\r\n\r\nIn order to fix this issue, we should remove this nesting in the Beats yml output. Just like `@timestamp` and other base fields are not nested under a field group.\r\n\r\nI think this bug fix will be at minimum backported to 1.7. Thoughts welcome on this, is there a need to backport to 1.6 as well?\r\n\r\nThe Beats PR https://github.com/elastic/beats/pull/22571 to import ECS 1.7 should be adjusted with these changes, once the bug fix is ready. cc @andrewstucki \r\n\n", "before_files": [{"content": "from os.path import join\nfrom collections import OrderedDict\nfrom generators import ecs_helpers\n\n\ndef generate(ecs_nested, ecs_version, out_dir):\n # Load temporary whitelist for default_fields workaround.\n df_whitelist = ecs_helpers.yaml_load('scripts/generators/beats_default_fields_whitelist.yml')\n\n # base first\n beats_fields = fieldset_field_array(ecs_nested['base']['fields'], df_whitelist, ecs_nested['base']['prefix'])\n\n allowed_fieldset_keys = ['name', 'title', 'group', 'description', 'footnote', 'type']\n # other fieldsets\n for fieldset_name in sorted(ecs_nested):\n if 'base' == fieldset_name:\n continue\n fieldset = ecs_nested[fieldset_name]\n\n beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys)\n beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_whitelist, fieldset['prefix'])\n beats_fields.append(beats_field)\n\n beats_file = OrderedDict()\n beats_file['key'] = 'ecs'\n beats_file['title'] = 'ECS'\n beats_file['description'] = 'ECS Fields.'\n beats_file['fields'] = beats_fields\n\n write_beats_yaml(beats_file, ecs_version, out_dir)\n\n\ndef fieldset_field_array(source_fields, df_whitelist, fieldset_prefix):\n allowed_keys = ['name', 'level', 'required', 'type', 'object_type',\n 'ignore_above', 'multi_fields', 'format', 'input_format',\n 'output_format', 'output_precision', 'description',\n 'example', 'enabled', 'index', 'path', 'scaling_factor']\n multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']\n\n fields = []\n for nested_field_name in source_fields:\n ecs_field = source_fields[nested_field_name]\n beats_field = ecs_helpers.dict_copy_keys_ordered(ecs_field, allowed_keys)\n if '' == fieldset_prefix:\n contextual_name = nested_field_name\n else:\n contextual_name = '.'.join(nested_field_name.split('.')[1:])\n\n cleaned_multi_fields = []\n if 'multi_fields' in ecs_field:\n for mf in ecs_field['multi_fields']:\n # Set default_field if necessary. Avoid adding the key if the parent\n # field already is marked with default_field: false.\n if not mf['flat_name'] in df_whitelist and ecs_field['flat_name'] in df_whitelist:\n mf['default_field'] = False\n cleaned_multi_fields.append(\n ecs_helpers.dict_copy_keys_ordered(mf, multi_fields_allowed_keys))\n beats_field['multi_fields'] = cleaned_multi_fields\n\n beats_field['name'] = contextual_name\n\n if not ecs_field['flat_name'] in df_whitelist:\n beats_field['default_field'] = False\n\n fields.append(beats_field)\n return sorted(fields, key=lambda x: x['name'])\n\n# Helpers\n\n\ndef write_beats_yaml(beats_file, ecs_version, out_dir):\n ecs_helpers.make_dirs(join(out_dir, 'beats'))\n warning = file_header().format(version=ecs_version)\n ecs_helpers.yaml_dump(join(out_dir, 'beats/fields.ecs.yml'), [beats_file], preamble=warning)\n\n\n# Templates\n\n\ndef file_header():\n return '''\n# WARNING! Do not edit this file directly, it was generated by the ECS project,\n# based on ECS version {version}.\n# Please visit https://github.com/elastic/ecs to suggest changes to ECS fields.\n\n'''.lstrip()\n", "path": "scripts/generators/beats.py"}]}
1,740
167
gh_patches_debug_34022
rasdani/github-patches
git_diff
scrapy__scrapy-594
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> engine status util references removed engine.slots attribute ``` Traceback (most recent call last): Less File "/usr/lib/pymodules/python2.7/scrapy/xlib/pydispatch/robustapply.py", line 54, in robustApply return receiver(*arguments, **named) File "/usr/lib/pymodules/python2.7/scrapy/contrib/memusage.py", line 63, in engine_started tsk.start(60.0, now=True) File "/usr/lib/python2.7/dist-packages/twisted/internet/task.py", line 163, in start self() File "/usr/lib/python2.7/dist-packages/twisted/internet/task.py", line 208, in __call__ d = defer.maybeDeferred(self.f, *self.a, **self.kw) --- <exception caught here> --- File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 134, in maybeDeferred result = f(*args, **kw) File "/usr/lib/pymodules/python2.7/scrapy/contrib/memusage.py", line 103, in _check_warning self._send_report(self.notify_mails, subj) File "/usr/lib/pymodules/python2.7/scrapy/contrib/memusage.py", line 116, in _send_report s += pformat(get_engine_status(self.crawler.engine)) File "/usr/lib/pymodules/python2.7/scrapy/utils/engine.py", line 33, in get_engine_status for spider in engine.slots.keys(): exceptions.AttributeError: 'ExecutionEngine' object has no attribute 'slots' ``` </issue> <code> [start of scrapy/utils/engine.py] 1 """Some debugging functions for working with the Scrapy engine""" 2 3 from __future__ import print_function 4 from time import time # used in global tests code 5 6 def get_engine_status(engine): 7 """Return a report of the current engine status""" 8 global_tests = [ 9 "time()-engine.start_time", 10 "engine.has_capacity()", 11 "len(engine.downloader.active)", 12 "engine.scraper.is_idle()", 13 ] 14 spider_tests = [ 15 "engine.spider_is_idle(spider)", 16 "engine.slot.closing", 17 "len(engine.slot.inprogress)", 18 "len(engine.slot.scheduler.dqs or [])", 19 "len(engine.slot.scheduler.mqs)", 20 "len(engine.scraper.slot.queue)", 21 "len(engine.scraper.slot.active)", 22 "engine.scraper.slot.active_size", 23 "engine.scraper.slot.itemproc_size", 24 "engine.scraper.slot.needs_backout()", 25 ] 26 27 status = {'global': [], 'spiders': {}} 28 for test in global_tests: 29 try: 30 status['global'] += [(test, eval(test))] 31 except Exception as e: 32 status['global'] += [(test, "%s (exception)" % type(e).__name__)] 33 for spider in engine.slots.keys(): 34 x = [] 35 for test in spider_tests: 36 try: 37 x += [(test, eval(test))] 38 except Exception as e: 39 x += [(test, "%s (exception)" % type(e).__name__)] 40 status['spiders'][spider] = x 41 return status 42 43 def format_engine_status(engine=None): 44 status = get_engine_status(engine) 45 s = "Execution engine status\n\n" 46 for test, result in status['global']: 47 s += "%-47s : %s\n" % (test, result) 48 s += "\n" 49 for spider, tests in status['spiders'].items(): 50 s += "Spider: %s\n" % spider 51 for test, result in tests: 52 s += " %-50s : %s\n" % (test, result) 53 return s 54 55 def print_engine_status(engine): 56 print(format_engine_status(engine)) 57 58 [end of scrapy/utils/engine.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/utils/engine.py b/scrapy/utils/engine.py --- a/scrapy/utils/engine.py +++ b/scrapy/utils/engine.py @@ -5,14 +5,13 @@ def get_engine_status(engine): """Return a report of the current engine status""" - global_tests = [ + tests = [ "time()-engine.start_time", "engine.has_capacity()", "len(engine.downloader.active)", "engine.scraper.is_idle()", - ] - spider_tests = [ - "engine.spider_is_idle(spider)", + "engine.spider.name", + "engine.spider_is_idle(engine.spider)", "engine.slot.closing", "len(engine.slot.inprogress)", "len(engine.slot.scheduler.dqs or [])", @@ -24,34 +23,23 @@ "engine.scraper.slot.needs_backout()", ] - status = {'global': [], 'spiders': {}} - for test in global_tests: + checks = [] + for test in tests: try: - status['global'] += [(test, eval(test))] + checks += [(test, eval(test))] except Exception as e: - status['global'] += [(test, "%s (exception)" % type(e).__name__)] - for spider in engine.slots.keys(): - x = [] - for test in spider_tests: - try: - x += [(test, eval(test))] - except Exception as e: - x += [(test, "%s (exception)" % type(e).__name__)] - status['spiders'][spider] = x - return status + checks += [(test, "%s (exception)" % type(e).__name__)] + + return checks def format_engine_status(engine=None): - status = get_engine_status(engine) + checks = get_engine_status(engine) s = "Execution engine status\n\n" - for test, result in status['global']: + for test, result in checks: s += "%-47s : %s\n" % (test, result) s += "\n" - for spider, tests in status['spiders'].items(): - s += "Spider: %s\n" % spider - for test, result in tests: - s += " %-50s : %s\n" % (test, result) + return s def print_engine_status(engine): print(format_engine_status(engine)) -
{"golden_diff": "diff --git a/scrapy/utils/engine.py b/scrapy/utils/engine.py\n--- a/scrapy/utils/engine.py\n+++ b/scrapy/utils/engine.py\n@@ -5,14 +5,13 @@\n \n def get_engine_status(engine):\n \"\"\"Return a report of the current engine status\"\"\"\n- global_tests = [\n+ tests = [\n \"time()-engine.start_time\",\n \"engine.has_capacity()\",\n \"len(engine.downloader.active)\",\n \"engine.scraper.is_idle()\",\n- ]\n- spider_tests = [\n- \"engine.spider_is_idle(spider)\",\n+ \"engine.spider.name\",\n+ \"engine.spider_is_idle(engine.spider)\",\n \"engine.slot.closing\",\n \"len(engine.slot.inprogress)\",\n \"len(engine.slot.scheduler.dqs or [])\",\n@@ -24,34 +23,23 @@\n \"engine.scraper.slot.needs_backout()\",\n ]\n \n- status = {'global': [], 'spiders': {}}\n- for test in global_tests:\n+ checks = []\n+ for test in tests:\n try:\n- status['global'] += [(test, eval(test))]\n+ checks += [(test, eval(test))]\n except Exception as e:\n- status['global'] += [(test, \"%s (exception)\" % type(e).__name__)]\n- for spider in engine.slots.keys():\n- x = []\n- for test in spider_tests:\n- try:\n- x += [(test, eval(test))]\n- except Exception as e:\n- x += [(test, \"%s (exception)\" % type(e).__name__)]\n- status['spiders'][spider] = x\n- return status\n+ checks += [(test, \"%s (exception)\" % type(e).__name__)]\n+\n+ return checks\n \n def format_engine_status(engine=None):\n- status = get_engine_status(engine)\n+ checks = get_engine_status(engine)\n s = \"Execution engine status\\n\\n\"\n- for test, result in status['global']:\n+ for test, result in checks:\n s += \"%-47s : %s\\n\" % (test, result)\n s += \"\\n\"\n- for spider, tests in status['spiders'].items():\n- s += \"Spider: %s\\n\" % spider\n- for test, result in tests:\n- s += \" %-50s : %s\\n\" % (test, result)\n+\n return s\n \n def print_engine_status(engine):\n print(format_engine_status(engine))\n-\n", "issue": "engine status util references removed engine.slots attribute\n```\nTraceback (most recent call last): Less\n File \"/usr/lib/pymodules/python2.7/scrapy/xlib/pydispatch/robustapply.py\", line 54, in robustApply\n return receiver(*arguments, **named)\n File \"/usr/lib/pymodules/python2.7/scrapy/contrib/memusage.py\", line 63, in engine_started\n tsk.start(60.0, now=True)\n File \"/usr/lib/python2.7/dist-packages/twisted/internet/task.py\", line 163, in start\n self()\n File \"/usr/lib/python2.7/dist-packages/twisted/internet/task.py\", line 208, in __call__\n d = defer.maybeDeferred(self.f, *self.a, **self.kw)\n --- <exception caught here> ---\n File \"/usr/lib/python2.7/dist-packages/twisted/internet/defer.py\", line 134, in maybeDeferred\n result = f(*args, **kw)\n File \"/usr/lib/pymodules/python2.7/scrapy/contrib/memusage.py\", line 103, in _check_warning\n self._send_report(self.notify_mails, subj)\n File \"/usr/lib/pymodules/python2.7/scrapy/contrib/memusage.py\", line 116, in _send_report\n s += pformat(get_engine_status(self.crawler.engine))\n File \"/usr/lib/pymodules/python2.7/scrapy/utils/engine.py\", line 33, in get_engine_status\n for spider in engine.slots.keys():\n exceptions.AttributeError: 'ExecutionEngine' object has no attribute 'slots'\n```\n\n", "before_files": [{"content": "\"\"\"Some debugging functions for working with the Scrapy engine\"\"\"\n\nfrom __future__ import print_function\nfrom time import time # used in global tests code\n\ndef get_engine_status(engine):\n \"\"\"Return a report of the current engine status\"\"\"\n global_tests = [\n \"time()-engine.start_time\",\n \"engine.has_capacity()\",\n \"len(engine.downloader.active)\",\n \"engine.scraper.is_idle()\",\n ]\n spider_tests = [\n \"engine.spider_is_idle(spider)\",\n \"engine.slot.closing\",\n \"len(engine.slot.inprogress)\",\n \"len(engine.slot.scheduler.dqs or [])\",\n \"len(engine.slot.scheduler.mqs)\",\n \"len(engine.scraper.slot.queue)\",\n \"len(engine.scraper.slot.active)\",\n \"engine.scraper.slot.active_size\",\n \"engine.scraper.slot.itemproc_size\",\n \"engine.scraper.slot.needs_backout()\",\n ]\n\n status = {'global': [], 'spiders': {}}\n for test in global_tests:\n try:\n status['global'] += [(test, eval(test))]\n except Exception as e:\n status['global'] += [(test, \"%s (exception)\" % type(e).__name__)]\n for spider in engine.slots.keys():\n x = []\n for test in spider_tests:\n try:\n x += [(test, eval(test))]\n except Exception as e:\n x += [(test, \"%s (exception)\" % type(e).__name__)]\n status['spiders'][spider] = x\n return status\n\ndef format_engine_status(engine=None):\n status = get_engine_status(engine)\n s = \"Execution engine status\\n\\n\"\n for test, result in status['global']:\n s += \"%-47s : %s\\n\" % (test, result)\n s += \"\\n\"\n for spider, tests in status['spiders'].items():\n s += \"Spider: %s\\n\" % spider\n for test, result in tests:\n s += \" %-50s : %s\\n\" % (test, result)\n return s\n\ndef print_engine_status(engine):\n print(format_engine_status(engine))\n\n", "path": "scrapy/utils/engine.py"}]}
1,483
552
gh_patches_debug_26859
rasdani/github-patches
git_diff
SeldonIO__MLServer-850
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> MLServer to hide http health request logs to avoid polluting the logs As part of the Seldon Core addition https://github.com/SeldonIO/seldon-core/pull/4028 which moves the TCP ready checks into proper HTTP request ready checks to `v2/health/ready` there is now a lot of noise from the readiness checks every 5 seconds. We should explore ways in which we avoid this noise, perhaps making it completely silent by default, or eventually once the prometheus server is created on a separate server this could also be added (And both of them could be muted) ![image](https://user-images.githubusercontent.com/1447507/161749439-3e96c417-a630-4208-80d6-5fc5f262fcd3.png) </issue> <code> [start of mlserver/rest/server.py] 1 import uvicorn 2 3 from ..settings import Settings 4 from ..handlers import DataPlane, ModelRepositoryHandlers, get_custom_handlers 5 from ..model import MLModel 6 7 from .utils import matches 8 from .app import create_app 9 from .logging import logger 10 from typing import Optional 11 12 13 class _NoSignalServer(uvicorn.Server): 14 def install_signal_handlers(self): 15 pass 16 17 18 class RESTServer: 19 def __init__( 20 self, 21 settings: Settings, 22 data_plane: DataPlane, 23 model_repository_handlers: ModelRepositoryHandlers, 24 ): 25 self._settings = settings 26 self._data_plane = data_plane 27 self._model_repository_handlers = model_repository_handlers 28 self._app = create_app( 29 self._settings, 30 data_plane=self._data_plane, 31 model_repository_handlers=self._model_repository_handlers, 32 ) 33 34 async def add_custom_handlers(self, model: MLModel) -> MLModel: 35 handlers = get_custom_handlers(model) 36 for custom_handler, handler_method in handlers: 37 self._app.add_api_route( 38 custom_handler.rest_path, 39 handler_method, 40 methods=[custom_handler.rest_method], 41 ) 42 43 return model 44 45 async def delete_custom_handlers(self, model: MLModel) -> MLModel: 46 handlers = get_custom_handlers(model) 47 if len(handlers) == 0: 48 return model 49 50 # NOTE: Loop in reverse, so that it's quicker to find all the recently 51 # added routes and we can remove routes on-the-fly 52 for i, route in reversed(list(enumerate(self._app.routes))): 53 for j, (custom_handler, handler_method) in enumerate(handlers): 54 if matches(route, custom_handler, handler_method): # type: ignore 55 self._app.routes.pop(i) 56 handlers.pop(j) 57 58 return model 59 60 async def start(self): 61 cfg = self._get_config() 62 self._server = _NoSignalServer(cfg) 63 await self._server.serve() 64 65 def _get_config(self): 66 kwargs = {} 67 68 if self._settings._custom_rest_server_settings: 69 logger.warning( 70 "REST custom configuration is out of support. Use as your own risk" 71 ) 72 kwargs.update(self._settings._custom_rest_server_settings) 73 74 kwargs.update( 75 { 76 "host": self._settings.host, 77 "port": self._settings.http_port, 78 "root_path": self._settings.root_path, 79 "access_log": self._settings.debug, 80 } 81 ) 82 83 if self._settings.logging_settings: 84 # If not None, use ours. Otherwise, let Uvicorn fall back on its 85 # own config. 86 kwargs.update({"log_config": self._settings.logging_settings}) 87 88 return uvicorn.Config(self._app, **kwargs) 89 90 async def stop(self, sig: Optional[int] = None): 91 self._server.handle_exit(sig=sig, frame=None) 92 [end of mlserver/rest/server.py] [start of mlserver/rest/logging.py] 1 import logging 2 3 loggerName = "mlserver.rest" 4 logger = logging.getLogger(loggerName) 5 [end of mlserver/rest/logging.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mlserver/rest/logging.py b/mlserver/rest/logging.py --- a/mlserver/rest/logging.py +++ b/mlserver/rest/logging.py @@ -1,4 +1,35 @@ import logging + +class HealthEndpointFilter(logging.Filter): + """ + Filter to avoid logging health endpoints. + From: + https://github.com/encode/starlette/issues/864#issuecomment-653076434 + """ + + def filter(self, record: logging.LogRecord) -> bool: + if not isinstance(record.args, tuple): + return True + + if len(record.args) < 3: + return True + + request_method = record.args[1] + query_string = record.args[2] + if request_method != "GET": + return True + + if query_string in ["/v2/health/live", "/v2/health/ready"]: + return False + + return True + + +def disable_health_access_logs() -> None: + uvicorn_logger = logging.getLogger("uvicorn.access") + uvicorn_logger.addFilter(HealthEndpointFilter()) + + loggerName = "mlserver.rest" logger = logging.getLogger(loggerName) diff --git a/mlserver/rest/server.py b/mlserver/rest/server.py --- a/mlserver/rest/server.py +++ b/mlserver/rest/server.py @@ -6,7 +6,7 @@ from .utils import matches from .app import create_app -from .logging import logger +from .logging import logger, disable_health_access_logs from typing import Optional @@ -60,6 +60,9 @@ async def start(self): cfg = self._get_config() self._server = _NoSignalServer(cfg) + if not self._settings.debug: + disable_health_access_logs() + await self._server.serve() def _get_config(self):
{"golden_diff": "diff --git a/mlserver/rest/logging.py b/mlserver/rest/logging.py\n--- a/mlserver/rest/logging.py\n+++ b/mlserver/rest/logging.py\n@@ -1,4 +1,35 @@\n import logging\n \n+\n+class HealthEndpointFilter(logging.Filter):\n+ \"\"\"\n+ Filter to avoid logging health endpoints.\n+ From:\n+ https://github.com/encode/starlette/issues/864#issuecomment-653076434\n+ \"\"\"\n+\n+ def filter(self, record: logging.LogRecord) -> bool:\n+ if not isinstance(record.args, tuple):\n+ return True\n+\n+ if len(record.args) < 3:\n+ return True\n+\n+ request_method = record.args[1]\n+ query_string = record.args[2]\n+ if request_method != \"GET\":\n+ return True\n+\n+ if query_string in [\"/v2/health/live\", \"/v2/health/ready\"]:\n+ return False\n+\n+ return True\n+\n+\n+def disable_health_access_logs() -> None:\n+ uvicorn_logger = logging.getLogger(\"uvicorn.access\")\n+ uvicorn_logger.addFilter(HealthEndpointFilter())\n+\n+\n loggerName = \"mlserver.rest\"\n logger = logging.getLogger(loggerName)\ndiff --git a/mlserver/rest/server.py b/mlserver/rest/server.py\n--- a/mlserver/rest/server.py\n+++ b/mlserver/rest/server.py\n@@ -6,7 +6,7 @@\n \n from .utils import matches\n from .app import create_app\n-from .logging import logger\n+from .logging import logger, disable_health_access_logs\n from typing import Optional\n \n \n@@ -60,6 +60,9 @@\n async def start(self):\n cfg = self._get_config()\n self._server = _NoSignalServer(cfg)\n+ if not self._settings.debug:\n+ disable_health_access_logs()\n+\n await self._server.serve()\n \n def _get_config(self):\n", "issue": "MLServer to hide http health request logs to avoid polluting the logs\nAs part of the Seldon Core addition https://github.com/SeldonIO/seldon-core/pull/4028 which moves the TCP ready checks into proper HTTP request ready checks to `v2/health/ready` there is now a lot of noise from the readiness checks every 5 seconds. We should explore ways in which we avoid this noise, perhaps making it completely silent by default, or eventually once the prometheus server is created on a separate server this could also be added (And both of them could be muted)\r\n\r\n![image](https://user-images.githubusercontent.com/1447507/161749439-3e96c417-a630-4208-80d6-5fc5f262fcd3.png)\r\n\n", "before_files": [{"content": "import uvicorn\n\nfrom ..settings import Settings\nfrom ..handlers import DataPlane, ModelRepositoryHandlers, get_custom_handlers\nfrom ..model import MLModel\n\nfrom .utils import matches\nfrom .app import create_app\nfrom .logging import logger\nfrom typing import Optional\n\n\nclass _NoSignalServer(uvicorn.Server):\n def install_signal_handlers(self):\n pass\n\n\nclass RESTServer:\n def __init__(\n self,\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n ):\n self._settings = settings\n self._data_plane = data_plane\n self._model_repository_handlers = model_repository_handlers\n self._app = create_app(\n self._settings,\n data_plane=self._data_plane,\n model_repository_handlers=self._model_repository_handlers,\n )\n\n async def add_custom_handlers(self, model: MLModel) -> MLModel:\n handlers = get_custom_handlers(model)\n for custom_handler, handler_method in handlers:\n self._app.add_api_route(\n custom_handler.rest_path,\n handler_method,\n methods=[custom_handler.rest_method],\n )\n\n return model\n\n async def delete_custom_handlers(self, model: MLModel) -> MLModel:\n handlers = get_custom_handlers(model)\n if len(handlers) == 0:\n return model\n\n # NOTE: Loop in reverse, so that it's quicker to find all the recently\n # added routes and we can remove routes on-the-fly\n for i, route in reversed(list(enumerate(self._app.routes))):\n for j, (custom_handler, handler_method) in enumerate(handlers):\n if matches(route, custom_handler, handler_method): # type: ignore\n self._app.routes.pop(i)\n handlers.pop(j)\n\n return model\n\n async def start(self):\n cfg = self._get_config()\n self._server = _NoSignalServer(cfg)\n await self._server.serve()\n\n def _get_config(self):\n kwargs = {}\n\n if self._settings._custom_rest_server_settings:\n logger.warning(\n \"REST custom configuration is out of support. Use as your own risk\"\n )\n kwargs.update(self._settings._custom_rest_server_settings)\n\n kwargs.update(\n {\n \"host\": self._settings.host,\n \"port\": self._settings.http_port,\n \"root_path\": self._settings.root_path,\n \"access_log\": self._settings.debug,\n }\n )\n\n if self._settings.logging_settings:\n # If not None, use ours. Otherwise, let Uvicorn fall back on its\n # own config.\n kwargs.update({\"log_config\": self._settings.logging_settings})\n\n return uvicorn.Config(self._app, **kwargs)\n\n async def stop(self, sig: Optional[int] = None):\n self._server.handle_exit(sig=sig, frame=None)\n", "path": "mlserver/rest/server.py"}, {"content": "import logging\n\nloggerName = \"mlserver.rest\"\nlogger = logging.getLogger(loggerName)\n", "path": "mlserver/rest/logging.py"}]}
1,557
425
gh_patches_debug_23720
rasdani/github-patches
git_diff
ivy-llc__ivy-22870
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> lstsq </issue> <code> [start of ivy/functional/frontends/jax/numpy/linalg.py] 1 # local 2 import ivy 3 from ivy.functional.frontends.jax import Array 4 from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back 5 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes 6 from ivy.functional.frontends.jax.numpy import promote_types_of_jax_inputs 7 8 9 @to_ivy_arrays_and_back 10 def cholesky(a): 11 return ivy.cholesky(a) 12 13 14 @to_ivy_arrays_and_back 15 def cond(x, p=None): 16 return ivy.cond(x, p=p) 17 18 19 @to_ivy_arrays_and_back 20 def det(a): 21 return ivy.det(a) 22 23 24 @to_ivy_arrays_and_back 25 def eig(a): 26 return ivy.eig(a) 27 28 29 @to_ivy_arrays_and_back 30 def eigh(a, UPLO="L", symmetrize_input=True): 31 def symmetrize(x): 32 # TODO : Take Hermitian transpose after complex numbers added 33 return (x + ivy.swapaxes(x, -1, -2)) / 2 34 35 if symmetrize_input: 36 a = symmetrize(a) 37 38 return ivy.eigh(a, UPLO=UPLO) 39 40 41 @to_ivy_arrays_and_back 42 def eigvals(a): 43 return ivy.eigvals(a) 44 45 46 @to_ivy_arrays_and_back 47 def eigvalsh(a, UPLO="L"): 48 return ivy.eigvalsh(a, UPLO=UPLO) 49 50 51 @to_ivy_arrays_and_back 52 def inv(a): 53 return ivy.inv(a) 54 55 56 @to_ivy_arrays_and_back 57 def matrix_power(a, n): 58 return ivy.matrix_power(a, n) 59 60 61 @to_ivy_arrays_and_back 62 def matrix_rank(M, tol=None): 63 return ivy.matrix_rank(M, atol=tol) 64 65 66 @to_ivy_arrays_and_back 67 def multi_dot(arrays, *, precision=None): 68 return ivy.multi_dot(arrays) 69 70 71 @to_ivy_arrays_and_back 72 @with_supported_dtypes( 73 {"0.4.14 and below": ("float32", "float64")}, 74 "jax", 75 ) 76 def norm(x, ord=None, axis=None, keepdims=False): 77 if ord is None: 78 ord = 2 79 if type(axis) in [list, tuple] and len(axis) == 2: 80 return Array(ivy.matrix_norm(x, ord=ord, axis=axis, keepdims=keepdims)) 81 return Array(ivy.vector_norm(x, ord=ord, axis=axis, keepdims=keepdims)) 82 83 84 @to_ivy_arrays_and_back 85 def pinv(a, rcond=None): 86 return ivy.pinv(a, rtol=rcond) 87 88 89 @to_ivy_arrays_and_back 90 def qr(a, mode="reduced"): 91 return ivy.qr(a, mode=mode) 92 93 94 @to_ivy_arrays_and_back 95 def slogdet(a, method=None): 96 return ivy.slogdet(a) 97 98 99 @to_ivy_arrays_and_back 100 def solve(a, b): 101 return ivy.solve(a, b) 102 103 104 @to_ivy_arrays_and_back 105 def svd(a, /, *, full_matrices=True, compute_uv=True, hermitian=None): 106 if not compute_uv: 107 return ivy.svdvals(a) 108 return ivy.svd(a, full_matrices=full_matrices) 109 110 111 @to_ivy_arrays_and_back 112 @with_unsupported_dtypes({"0.4.14 and below": ("float16", "bfloat16")}, "jax") 113 def tensorinv(a, ind=2): 114 old_shape = ivy.shape(a) 115 prod = 1 116 if ind > 0: 117 invshape = old_shape[ind:] + old_shape[:ind] 118 for k in old_shape[ind:]: 119 prod *= k 120 else: 121 raise ValueError("Invalid ind argument.") 122 a = ivy.reshape(a, shape=(prod, -1)) 123 ia = ivy.inv(a) 124 new_shape = tuple([*invshape]) 125 return Array(ivy.reshape(ia, shape=new_shape)) 126 127 128 @to_ivy_arrays_and_back 129 def tensorsolve(a, b, axes=None): 130 a, b = promote_types_of_jax_inputs(a, b) 131 return ivy.tensorsolve(a, b, axes=axes) 132 [end of ivy/functional/frontends/jax/numpy/linalg.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/jax/numpy/linalg.py b/ivy/functional/frontends/jax/numpy/linalg.py --- a/ivy/functional/frontends/jax/numpy/linalg.py +++ b/ivy/functional/frontends/jax/numpy/linalg.py @@ -4,6 +4,7 @@ from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes from ivy.functional.frontends.jax.numpy import promote_types_of_jax_inputs +from ivy.functional.frontends.numpy.linalg import lstsq as numpy_lstsq @to_ivy_arrays_and_back @@ -53,6 +54,23 @@ return ivy.inv(a) +# TODO: replace this with function from API +# As the composition provides numerically unstable results +@to_ivy_arrays_and_back +def lstsq(a, b, rcond=None, *, numpy_resid=False): + if numpy_resid: + return numpy_lstsq(a, b, rcond=rcond) + least_squares_solution = ivy.matmul( + ivy.pinv(a, rtol=1e-15).astype(ivy.float64), b.astype(ivy.float64) + ) + residuals = ivy.sum((b - ivy.matmul(a, least_squares_solution)) ** 2).astype( + ivy.float64 + ) + svd_values = ivy.svd(a, compute_uv=False) + rank = ivy.matrix_rank(a).astype(ivy.int32) + return (least_squares_solution, residuals, rank, svd_values[0]) + + @to_ivy_arrays_and_back def matrix_power(a, n): return ivy.matrix_power(a, n)
{"golden_diff": "diff --git a/ivy/functional/frontends/jax/numpy/linalg.py b/ivy/functional/frontends/jax/numpy/linalg.py\n--- a/ivy/functional/frontends/jax/numpy/linalg.py\n+++ b/ivy/functional/frontends/jax/numpy/linalg.py\n@@ -4,6 +4,7 @@\n from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\n from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\n from ivy.functional.frontends.jax.numpy import promote_types_of_jax_inputs\n+from ivy.functional.frontends.numpy.linalg import lstsq as numpy_lstsq\n \n \n @to_ivy_arrays_and_back\n@@ -53,6 +54,23 @@\n return ivy.inv(a)\n \n \n+# TODO: replace this with function from API\n+# As the composition provides numerically unstable results\n+@to_ivy_arrays_and_back\n+def lstsq(a, b, rcond=None, *, numpy_resid=False):\n+ if numpy_resid:\n+ return numpy_lstsq(a, b, rcond=rcond)\n+ least_squares_solution = ivy.matmul(\n+ ivy.pinv(a, rtol=1e-15).astype(ivy.float64), b.astype(ivy.float64)\n+ )\n+ residuals = ivy.sum((b - ivy.matmul(a, least_squares_solution)) ** 2).astype(\n+ ivy.float64\n+ )\n+ svd_values = ivy.svd(a, compute_uv=False)\n+ rank = ivy.matrix_rank(a).astype(ivy.int32)\n+ return (least_squares_solution, residuals, rank, svd_values[0])\n+\n+\n @to_ivy_arrays_and_back\n def matrix_power(a, n):\n return ivy.matrix_power(a, n)\n", "issue": "lstsq\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax import Array\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.jax.numpy import promote_types_of_jax_inputs\n\n\n@to_ivy_arrays_and_back\ndef cholesky(a):\n return ivy.cholesky(a)\n\n\n@to_ivy_arrays_and_back\ndef cond(x, p=None):\n return ivy.cond(x, p=p)\n\n\n@to_ivy_arrays_and_back\ndef det(a):\n return ivy.det(a)\n\n\n@to_ivy_arrays_and_back\ndef eig(a):\n return ivy.eig(a)\n\n\n@to_ivy_arrays_and_back\ndef eigh(a, UPLO=\"L\", symmetrize_input=True):\n def symmetrize(x):\n # TODO : Take Hermitian transpose after complex numbers added\n return (x + ivy.swapaxes(x, -1, -2)) / 2\n\n if symmetrize_input:\n a = symmetrize(a)\n\n return ivy.eigh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\ndef eigvals(a):\n return ivy.eigvals(a)\n\n\n@to_ivy_arrays_and_back\ndef eigvalsh(a, UPLO=\"L\"):\n return ivy.eigvalsh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\ndef inv(a):\n return ivy.inv(a)\n\n\n@to_ivy_arrays_and_back\ndef matrix_power(a, n):\n return ivy.matrix_power(a, n)\n\n\n@to_ivy_arrays_and_back\ndef matrix_rank(M, tol=None):\n return ivy.matrix_rank(M, atol=tol)\n\n\n@to_ivy_arrays_and_back\ndef multi_dot(arrays, *, precision=None):\n return ivy.multi_dot(arrays)\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes(\n {\"0.4.14 and below\": (\"float32\", \"float64\")},\n \"jax\",\n)\ndef norm(x, ord=None, axis=None, keepdims=False):\n if ord is None:\n ord = 2\n if type(axis) in [list, tuple] and len(axis) == 2:\n return Array(ivy.matrix_norm(x, ord=ord, axis=axis, keepdims=keepdims))\n return Array(ivy.vector_norm(x, ord=ord, axis=axis, keepdims=keepdims))\n\n\n@to_ivy_arrays_and_back\ndef pinv(a, rcond=None):\n return ivy.pinv(a, rtol=rcond)\n\n\n@to_ivy_arrays_and_back\ndef qr(a, mode=\"reduced\"):\n return ivy.qr(a, mode=mode)\n\n\n@to_ivy_arrays_and_back\ndef slogdet(a, method=None):\n return ivy.slogdet(a)\n\n\n@to_ivy_arrays_and_back\ndef solve(a, b):\n return ivy.solve(a, b)\n\n\n@to_ivy_arrays_and_back\ndef svd(a, /, *, full_matrices=True, compute_uv=True, hermitian=None):\n if not compute_uv:\n return ivy.svdvals(a)\n return ivy.svd(a, full_matrices=full_matrices)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"0.4.14 and below\": (\"float16\", \"bfloat16\")}, \"jax\")\ndef tensorinv(a, ind=2):\n old_shape = ivy.shape(a)\n prod = 1\n if ind > 0:\n invshape = old_shape[ind:] + old_shape[:ind]\n for k in old_shape[ind:]:\n prod *= k\n else:\n raise ValueError(\"Invalid ind argument.\")\n a = ivy.reshape(a, shape=(prod, -1))\n ia = ivy.inv(a)\n new_shape = tuple([*invshape])\n return Array(ivy.reshape(ia, shape=new_shape))\n\n\n@to_ivy_arrays_and_back\ndef tensorsolve(a, b, axes=None):\n a, b = promote_types_of_jax_inputs(a, b)\n return ivy.tensorsolve(a, b, axes=axes)\n", "path": "ivy/functional/frontends/jax/numpy/linalg.py"}]}
1,794
405
gh_patches_debug_10802
rasdani/github-patches
git_diff
interlegis__sapl-2147
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Audiências Públicas sem possibilidade de Edição Ao criar uma Audiência Pública e salva-la, não aparecem os metadados da matéria legislativa inseridas no preenchimento. Ao clicar em Editar, só aparece o título da audiência criada. grato </issue> <code> [start of sapl/audiencia/views.py] 1 from django.http import HttpResponse 2 from django.views.decorators.clickjacking import xframe_options_exempt 3 from django.views.generic import UpdateView 4 from sapl.crud.base import RP_DETAIL, RP_LIST, Crud 5 6 from .forms import AudienciaForm 7 from .models import AudienciaPublica 8 9 10 def index(request): 11 return HttpResponse("Audiência Pública") 12 13 14 class AudienciaCrud(Crud): 15 model = AudienciaPublica 16 public = [RP_LIST, RP_DETAIL, ] 17 18 class BaseMixin(Crud.BaseMixin): 19 list_field_names = ['numero', 'nome', 'tipo', 'materia', 20 'data'] 21 ordering = 'nome', 'numero', 'tipo', 'data' 22 23 class ListView(Crud.ListView): 24 paginate_by = 10 25 26 class CreateView(Crud.CreateView): 27 form_class = AudienciaForm 28 29 def form_valid(self, form): 30 return super(Crud.CreateView, self).form_valid(form) 31 32 class UpdateView(Crud.UpdateView): 33 form_class = AudienciaForm 34 35 def get_initial(self): 36 initial = super(UpdateView, self).get_initial() 37 initial['tipo_materia'] = self.object.materia.tipo.id 38 initial['numero_materia'] = self.object.materia.numero 39 initial['ano_materia'] = self.object.materia.ano 40 return initial 41 42 class DeleteView(Crud.DeleteView): 43 pass 44 45 class DetailView(Crud.DetailView): 46 47 layout_key = 'AudienciaPublicaDetail' 48 49 @xframe_options_exempt 50 def get(self, request, *args, **kwargs): 51 return super().get(request, *args, **kwargs) 52 53 [end of sapl/audiencia/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sapl/audiencia/views.py b/sapl/audiencia/views.py --- a/sapl/audiencia/views.py +++ b/sapl/audiencia/views.py @@ -34,9 +34,10 @@ def get_initial(self): initial = super(UpdateView, self).get_initial() - initial['tipo_materia'] = self.object.materia.tipo.id - initial['numero_materia'] = self.object.materia.numero - initial['ano_materia'] = self.object.materia.ano + if self.object.materia: + initial['tipo_materia'] = self.object.materia.tipo.id + initial['numero_materia'] = self.object.materia.numero + initial['ano_materia'] = self.object.materia.ano return initial class DeleteView(Crud.DeleteView):
{"golden_diff": "diff --git a/sapl/audiencia/views.py b/sapl/audiencia/views.py\n--- a/sapl/audiencia/views.py\n+++ b/sapl/audiencia/views.py\n@@ -34,9 +34,10 @@\n \n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n- initial['tipo_materia'] = self.object.materia.tipo.id\n- initial['numero_materia'] = self.object.materia.numero\n- initial['ano_materia'] = self.object.materia.ano\n+ if self.object.materia:\n+ initial['tipo_materia'] = self.object.materia.tipo.id\n+ initial['numero_materia'] = self.object.materia.numero\n+ initial['ano_materia'] = self.object.materia.ano\n return initial\n \n class DeleteView(Crud.DeleteView):\n", "issue": "Audi\u00eancias P\u00fablicas sem possibilidade de Edi\u00e7\u00e3o\nAo criar uma Audi\u00eancia P\u00fablica e salva-la, n\u00e3o aparecem os metadados da mat\u00e9ria legislativa inseridas no preenchimento. \r\nAo clicar em Editar, s\u00f3 aparece o t\u00edtulo da audi\u00eancia criada.\r\ngrato\n", "before_files": [{"content": "from django.http import HttpResponse\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.generic import UpdateView\nfrom sapl.crud.base import RP_DETAIL, RP_LIST, Crud\n\nfrom .forms import AudienciaForm\nfrom .models import AudienciaPublica\n\n\ndef index(request):\n return HttpResponse(\"Audi\u00eancia P\u00fablica\")\n\n\nclass AudienciaCrud(Crud):\n model = AudienciaPublica\n public = [RP_LIST, RP_DETAIL, ]\n\n class BaseMixin(Crud.BaseMixin):\n list_field_names = ['numero', 'nome', 'tipo', 'materia',\n 'data']\n ordering = 'nome', 'numero', 'tipo', 'data'\n\n class ListView(Crud.ListView):\n paginate_by = 10\n\n class CreateView(Crud.CreateView):\n form_class = AudienciaForm\n\n def form_valid(self, form):\n return super(Crud.CreateView, self).form_valid(form)\n\n class UpdateView(Crud.UpdateView):\n form_class = AudienciaForm\n\n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n initial['tipo_materia'] = self.object.materia.tipo.id\n initial['numero_materia'] = self.object.materia.numero\n initial['ano_materia'] = self.object.materia.ano\n return initial\n \n class DeleteView(Crud.DeleteView):\n pass\n\n class DetailView(Crud.DetailView):\n\n layout_key = 'AudienciaPublicaDetail'\n\n @xframe_options_exempt\n def get(self, request, *args, **kwargs):\n return super().get(request, *args, **kwargs)\n\n ", "path": "sapl/audiencia/views.py"}]}
1,067
189